这套程序应用了正则表达式和requests库,爬取猫眼电影榜单前100名。
import requests
import re
import json
from requests.exceptions import RequestException
def get_one_page(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?title="(.*?)".*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?</dd>', re.S) #re.S是为了让.能够替代换行符
items = re.findall(pattern, html)
for item in items:
yield {
'index' : item[0],
'title' : item[1],
'star' : item[2].strip()[3:], #strip()能够去掉换行符, [3:]表示去掉前三个字符
'releasetime' : item[3].strip()[5:]
}
def write_to_file(content):
with open('D:\\movies.txt', 'a', encoding = 'utf-8') as f:
f.write(json.dumps(content, ensure_ascii = False) + '\n')
#encoding = 'utf-8', ensure_ascii = False是为了爬取信息下来时显示中文
#json.dumps()可以进行json格式的编码, 可以将字典转化为字符串
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
write_to_file(item)
for i in range(10):
main(i * 10)
虽然数据量不大,但是这里提供一个多线程抓取的方法,代码如下:
from multiprocessing.dummy import Pool as ThreadPool
if __name__ == '__main__':
pool = ThreadPool(4)
pool.map(main, [i * 10 for i in range(10)]) #将数组中的内容作为函数的参数传递过去
pool.close()
pool.join()