import requests
from lxml import etree
import os
def headers(referer):#图片的下载可能和头部的referer有关,所以将referer设为变换值,以躲避反扒
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Referer': '{}'.format(referer)}
return headers
def Tuji(pag):#找图集
fullurl = 'http://www.mzitu.com/page/{}/'.format(pag)
shouye_html = requests.get(fullurl)
shouye_html_text = shouye_html.text
shouye_ele = etree.HTML(shouye_html_text)
tj_list = shouye_ele.xpath('//*[@id="pins"]/li/a/@href')#找每页的图集url
Tuji_url_list = []
for tj_url in tj_list:
Tuji_url_list.append(tj_url)
return Tuji_url_list
def gettuji_info(tj_url_list):#图集的url列表 收集图集的相关信息
for tj_url_1 in tj_url_list: #tj_url_1 --- > http://www.mzitu.com/146823
tj_html = requests.get(tj_url_1, headers=headers(tj_url_1))
tj_html_text = tj_html.text
tj_ele = etree.HTML(tj_html_text)
img_title = tj_ele.xpath('//h2[@class="main-title"]/text()')[0]
# 图集名称
max_pag_list = int(tj_ele.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()')[0]) # 找最大页数
os.mkdir(img_title)
for i in range(1, int(max_pag_list + 1)):
tj_url_2 = tj_url_1 + '/'+str(i)
#tj_url_2 ---> http://www.mzitu.com/146823 + pag
tj_html = requests.get(tj_url_2, headers=headers(tj_url_1))
tj_html_text = tj_html.text
tj_ele = etree.HTML(tj_html_text)
img_url = tj_ele.xpath('//div[@class="main-image"]/p/a/img/@src')[0] # 从不同的tj_url_2中找图片的url
print('正在下载'+img_title+'第'+str(i)+'张')
with open(img_title+'/'+str(i)+'.jpg', "wb+") as jpg:
jpg.write(requests.get(img_url, headers=headers(tj_url_2)).content)
jpg.close()
if __name__ == '__main__':
pags = int(input('你想搞几页的嘿嘿?'))
for pag in range(1,pags+1):
gettuji_info(Tuji(pag))
让你营养跟不上的图片爬取
猜你喜欢
转载自blog.csdn.net/majiexiong/article/details/81838609
今日推荐
周排行