# 分页抓取网站上的图片 并保存到文件夹中
import requests
import re
import os
if __name__=="__main__":
if not os.path.exists('./qiutuLibs/'):
os.mkdir('./qiutuLibs/')
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3947.100 Safari/537.36"}
#https://www.qiushibaike.com/imgrank/page/2/
#https://www.qiushibaike.com/imgrank/page/4/
# <div class="thumb">
#
# <a href="/article/123595000" target="_blank">
# <img src="//pic.qiushibaike.com/system/pictures/12359/123595000/medium/6IUHDYTJXLDN16DA.jpg" alt="糗事#123595000" class="illustration" width="100%" height="auto">
# </a>
# </div>
base_url = 'https://www.qiushibaike.com/imgrank/page/%d/'# %d占位
ex='<div class="thumb">.*?<img src="(.*?)" alt.*?</div>' #构建正则
for pageNum in range(1,6):#1-5页的图片
url = format(base_url%pageNum) # 用pageNum代替之前的占位符
#print('每页url',url)
page_text = requests.get(url=url,headers=headers).text
img_src_list = re.findall(ex,page_text,re.S)
print(img_src_list)
for src in img_src_list:
src_url = "https:"+src
img_data = requests.get(url=src_url,headers=headers).content
img_name = src_url.split('/')[-1]#-1表示最后一个
img_path = './qiutuLibs/'+img_name
with open(img_path,'wb') as fp:
fp.write(img_data)
print(img_name,"Downloaded!!")
Python爬虫学习笔记(四)
猜你喜欢
转载自blog.csdn.net/Kaaaakaki/article/details/109104052
今日推荐
周排行