import threading
import re
import urllib.request
import urllib.error
headers=("User-Agent","Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0")
opener=urllib.request.build_opener()
opener.addheader=[headers]
urllib.request.install_opener(opener)
class one(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(1,36,2):
try:
url="http://www.qiushibaike.com/8hr/page/"+str(i)
pagedata=use_proxy(url,IP)
print(len(pagedata))
pat='<div class="content">.*?<span>(.*?)</span>.*?</>'
datalist=re.compile(pat,re.S).findall(pagedata)
print(len(datalist))
for j in range(0,len(datalist)):
print("第"+str(i)+"页第"+str(j)+"个段子的内容是: ")
print(datalist[j])
except urllib.error.HTTPError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
class two(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(0,36,2):
try:
url="http://www.qiushibaike.com/8hr/page/"+str(i)
pagedata=use_proxy(url,IP)
print(len(pagedata))
pat='<div class="content">.*?<span>(.*?)</span>.*?</>'
datalist=re.compile(pat,re.S).findall(pagedata)
print(len(datalist))
for j in range(0,len(datalist)):
print("第"+str(i)+"页第"+str(j)+"个段子的内容是: ")
print(datalist[j])
except urllib.error.HTTPError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
def use_proxy(url,IP):
proxy=urllib.request.ProxyHandler({"http":IP})
opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandler)
#添加为全局
urllib.request.install_opener(opener)
data=urllib.request.urlopen(url).read()
data=data.decode("utf-8","ignore")
return data
IP="61.135.217.7:80"
t1=one()
t2=two()
t1.start()
t2.start()