from lxml import etree
import requests
import os
# 爬取彼岸图网美女图片前三页
if __name__ == '__main__':
# 爬取到页面源码数据
url = ['http://pic.netbian.com/4kmeinv/','http://pic.netbian.com/4kmeinv/index_2.html', 'http://pic.netbian.com/4kmeinv/index_3.html']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36 Edg/88.0.705.63'
}
# 创建一个文件夹
if not os.path.exists('./picture'):
os.mkdir('./picture')
for i in url:
response = requests.get(url=i, headers=headers)
page_text = response.text
# 数据解析
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@class="slist"]/ul/li')
for li in li_list:
img_href= 'http://pic.netbian.com' + li.xpath('./a//@href')[0]
# 通用的解决中文乱码的解决方案
img_detail = requests.get(url=img_href, headers=headers).text
detail_tree = etree.HTML(img_detail)
img_detail_url = 'http://pic.netbian.com' + detail_tree.xpath('//div[@class="photo-pic"]/a/img/@src')[0]
img_detail_data = requests.get(url=img_detail_url, headers=headers).content
img_name = li.xpath('./a/img/@alt')[0]+'.jpg'
img_name = img_name.encode('iso-8859-1').decode('gbk')
img_path = 'picture/' + img_name
with open(img_path, 'wb') as fp:
fp.write(img_detail_data)
print(img_name, "下载成功")
供学习参考