先记录一下,普通的糗事百科爬虫:
import urllib.request
import re
import time
import urllib.error
headers=('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0')
opener=urllib.request.build_opener()
opener.addheaders=[headers]
urllib.request.install_opener(opener)
for i in range(1,3):
url='https://www.qiushibaike.com/8hr/page/'+str(i)
pagedata=urllib.request.urlopen(url).read().decode('utf-8','ignore')
pat='<div class="content">.*?<span>(.*?)</span>.*?</div>'
datalist=re.compile(pat,re.S).findall(pagedata)
for j in range(0,len(datalist)):
print('第'+str(i)+'页第'+str(j)+'几个段子的内容是:')
print(datalist[j])
多线程爬虫可以实现不同实例化的爬虫类,同步进行处理。
import urllib.request
import re
import time
import urllib.error
import threading
headers=('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0')
opener=urllib.request.build_opener()
opener.addheaders=[headers]
urllib.request.install_opener(opener)
class One(threading.Thread):
def __int__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(1,36,2):
url='https://www.qiushibaike.com/8hr/page/'+str(i)
pagedata=urllib.request.urlopen(url).read().decode('utf-8','ignore')
pat='<div class="content">.*?<span>(.*?)</span>.*?</div>'
datalist=re.compile(pat,re.S).findall(pagedata)
for j in range(0,len(datalist)):
print('第'+str(i)+'页第'+str(j)+'几个段子的内容是:')
print(datalist[j])
class Two(threading.Thread):
def __int__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(0,36,2):
url='https://www.qiushibaike.com/8hr/page/'+str(i)
pagedata=urllib.request.urlopen(url).read().decode('utf-8','ignore')
pat='<div class="content">.*?<span>(.*?)</span>.*?</div>'
datalist=re.compile(pat,re.S).findall(pagedata)
for j in range(0,len(datalist)):
print('第'+str(i)+'页第'+str(j)+'个段子的内容是:')
print(datalist[j])
one = One()
one.start()
two=Two()
two.start()