数据在网页源代码中用正则
案例一(饭前点心)
import urllib.request
url = 'https://tieba.baidu.com/p/5734214281'
response=urllib.request.urlopen(url)
html=response.read()
html=html.decode('utf-8')
html
运行截图如下:
import re
reg=r'src="(.+?\.jpg)" pic_ext' #r在这里是来识别双引号的,避免与python中字符串的双引号用法冲突
imgre=re.compile(reg)#编译正则表达式
imglist=re.findall(imgre,html)
x=1
for i in imglist:
print(x)
urllib.request.urlretrieve(i,'C:\\Users\\18487\\Desktop\\picture\\%s.jpg'%x)
x+=1
print("done")
案例二:爬取任何你想要的有关图片(大餐来啦)
import requests
import re
import os
import time
x = 1 #图片计数
def getHtml(url):
#proxies={"http":"http://1.197.16.148:9999"} #代理IP,反反爬
#headers={'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
#'Cookie': '_tact=MWJkMmI3MzEtOTYx' } #iPhone的header并带有cookie
#html=requests.get(url,headers=headers,proxies=proxies).text
headers={'Accept-Encoding':'gzip,deflate',
'Accept-Language':'zh-CN',
'Cache-Control':'no-cache',
'Connection':'keep-alive',
'Pragma':'no-cache',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.6.2000 Chrome/30.0.1599.101 Safari/537.36'}
try:
opener = requests.get(url,headers=headers,timeout=6 ) #6S超时
except Exception as e:
print('咋这么慢呢?!链接错误(在获取pageImage页失败):',str(e) )
return ' '
else:
time.sleep(4)
html = opener.text
print('真正请求的网址是: ',opener.request.url)
return html
def getImagesList(html):
reg = r'src="(https.{3}imgsa.baidu.com.*?\.jpg)" '
return re.compile(reg).findall(html) #返回一个list
def get_desktop(): #获取桌面地址
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
return winreg.QueryValueEx(key,"Desktop")[0]
def setDir(): #创建文件夹
path = get_desktop() + '\\下载的图片' #将要创建的保存的路径
if not os.path.isdir(path): # 如果没有文件夹则创建
os.makedirs(path)
paths = path+'\\'
return paths #返回 ‘桌面/下载的图片/’,如使用\分割路径,需要转义成\\
def requests_download_image(imgList,paths):
global x #获取全局变量。x用于计次
for imgurl in imgList:
if not imgurl.startswith('http') : # 为每一个获取的链接加上HTTP协议(如果没有的话)
imgurl = 'http://{}'.format(imgurl)
tips = "\n第"+str(x)+"张照片的地址是:"+imgurl+"\n" #状态进度提示
print(tips)
# 保存照片,以时间后缀避免重复而删除原文件
geshiTime="%Y-%m-%d %H-%M-%S"
namePlus = time.strftime(geshiTime, time.localtime()) #如果"%Y-%m-%d %H-%M-%S %A" 返回 2019-08-05 20-27-28 Monday
headers={
'Accept':'image/webp,*/*;q=0.8',
'Accept-Encoding':'gzip,deflate',
'Accept-Language':'zh-CN',
'Cache-Control':'no-cache',
'Connection':'keep-alive',
'Cookie':'BAIDUID=4640920B96BB6974BA8866841700E60F:FG=1',
'DNT':'1',
'Host':'imgsa.baidu.com',
'Pragma':'no-cache',
'Referer':'https://tieba.baidu.com',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.6.2000 Chrome/30.0.1599.101 Safari/537.36',
}
opener = requests.get(imgurl, headers=headers, stream=True) #stream为流数据存入文档
creatPath = '{}{}.jpg'.format(paths,"图片_PICS_"+namePlus+'_form python Jie no. '+str(x)) #返回类似:'D:/Desktop/demo3.jpg'
mkdir(paths) #如果目录不存在则创建目录
with open(creatPath, 'wb') as fd: #目录不存在则报错
for chunk in opener.iter_content(128): #每接收到128B字节存入文档一次,循环直到存完
fd.write(chunk)
time.sleep(2)
x = x + 1
saveImgsUrlToTxtFile(tips) #将图片下载网址保存到本地文件
print("\n\n总共下载了:" + str(len(imgList)) + " 张图片到本地\n\n")
return imgList #这里将获得全部照片地址返回
def saveImgsUrlToTxtFile(tips):
path = setDir()+'tupian List.txt' # 将图片文本保存到txt中
f = open(path,'a+') #追加模式打开文件,不存在则创建(可读可写)
f.write(tips)
f.close()
def mkdir(youPath):
path=youPath.strip() # 去除首位空格
path=youPath.rstrip("\\") # 去除尾部 \ 符号
isExists=os.path.exists(youPath) #true 或者 false, 判断路径是否存在
if not isExists:
os.makedirs(path) #多层创建目录
def getImgPagesList(html): #for searchSoft()
reg = r'noreferrer" href="(/p/.+?)"'
pageList = re.compile(reg).findall(html) #返回一个list
print(pageList)
newPageList = []
for url in pageList:
url = 'https://tieba.baidu.com'+url
newPageList.append(url)
print('\n发现新的图片页: ',url)
return newPageList
def mainSoft(TieBaUrl):
imgList = getImagesList(getHtml(TieBaUrl))
requests_download_image(imgList,setDir())
def searchSoft():
searWord = input("输入关键字,为你下载一些图片,输入后按enter键继续:\n")
#生成带搜索参数的网址:
import urllib.request
searWord_encode=urllib.request.quote(searWord) #关键字编码
url='https://tieba.baidu.com/f?ie=utf-8&kw={}&fr=search'.format(searWord_encode) # https://tieba.baidu.com/f?kw={你输入的变量值}
for TieZiUrl in getImgPagesList(getHtml(url)): #分批下载图片
mainSoft(TieZiUrl)
#getHtml('https://tieba.baidu.com/p/6209253803')
#使用 cmd 中 Python XXX.py 运行文件
searchSoft()
print('\n\n 已经 ok !')
运行结果如下: