爬取流程
1.确定url
2.请求url
3.使用xpath处理数据
4.保存数据
import time import json import requests from lxml import etree class GuoKe(object): def __init__(self): self.base_url = 'https://www.guokr.com/ask/hottest/' self.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36", "Referer": "https://www.guokr.com/" } def get_data(self, url, page, is_detail=False): if not is_detail: param = { "page": page } return requests.get(url, headers=self.headers, params=param).text return requests.get(url, headers=self.headers).text def parse_text(self, text): dom = etree.HTML(text) return dom def detail_dom(self, dom): answer = dom.xpath('//div[@id="questionDesc"]/p/text()')[0] return answer def parse(self, dom): nodes = dom.xpath('//div[@class="ask-list-detials"]') for node in nodes: item = {} item['title'] = node.xpath('./h2/a/text()')[0] detail_url = node.xpath('./h2/a/@href')[0] print(detail_url) detail_text = self.get_data(detail_url, True) detail_dom = etree.HTML(detail_text) answer = self.detail_dom(detail_dom) print(answer) item['answer'] = answer print(item) yield item def save(self, f, item): f.write(json.dumps(item, ensure_ascii=False) + ',\n') def run(self): with open('guoke.json', 'w')as f: page = int(input('请输入页码:')) for i in range(page): text = self.get_data(self.base_url, i + 1) dom = self.parse_text(text) my_generator = self.parse(dom) while True: try: item = next(my_generator) self.save(f, item) time.sleep(0.4) except: break print(f'第{i + 1}页保存完成') if __name__ == '__main__': guoke = GuoKe() guoke.run()