版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/Kimidake/article/details/85054957
#! python3
# 获取豆瓣前250数据
import requests,bs4,pprint
def geturls():
"""获取每一页网址,返回URL列表"""
urllib = []
req = requests.get('https://movie.douban.com/top250')
soup = bs4.BeautifulSoup(req.text)
page_spans = soup.find(name='div',attrs={'class':'paginator'}).find_all(name='a')
for page_span in page_spans:
urllib.append('https://movie.douban.com/top250'+page_span['href'])
return urllib
list = []
count = 1
for url in geturls():
req = requests.get(url)
soup = bs4.BeautifulSoup(req.text)
items = soup.find_all('div',attrs={'class':'item'})
for item in items:
dict = {}
dict['num'] = count
dict['title'] = item.find(name='span',attrs={'class':'title'}).string
dict['actors'] = ''.join(item.select_one('div p').text.split())
dict['star'] = item.find(name='span',attrs={'class':'rating_num'}).string
dict['comment_nums'] = item.find(name='span',attrs={'property':'v:best'}).find_next_sibling().string
try:
dict['quote'] = item.find(name='span',attrs={'class':'inq'}).string
except AttributeError as a:
print(a)
dict['quote'] = ''
list.append(dict)
count += 1
# 输出到桌面
file = open(r'C:\Users\Administrator\Desktop\movies_top_250.txt','w',encoding='utf-8')
for ele in list:
file.write('名次:'+str(ele['num'])+'\n')
file.write('标题:'+ele['title']+'\n')
file.write(ele['actors']+'\n')
file.write('评分:'+ele['star']+'\n')
file.write('评论数:'+ele['comment_nums']+'\n')
file.write('--------------------------------\n')
file.close()
print(pprint.pformat(list))