基础爬虫

这里写图片描述

URL管理器

class UrlManager():
    def __init__(self):
        self.new_urls=set()
        self.old_urls=set()

    def has_new_url(self):
        return self.new_url_size()!=0

    def get_new_url(self):
        new_url=self.new_urls.pop()
        self.old_urls.add(new_url)
        return new_url

    def add_new_url(self,url):
        if url is None:
            return
        if url not in self.new_urls and url not in self.old_urls:
            self.new_urls.add(url)

    def add_new_urls(self,urls):
        if urls is None or len(urls)==0:
            return
        for url in urls:
            self.add_new_url(url)

    def new_url_size(self):
        return len(self.new_urls)

    def old_url_size(self):
        return len(self.old_urls)

HTML下载器

import requests

class HtmlDownloader():
    def download(self,url):
        if url is None:
            return None
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331',
            'Referer': r'https://baike.baidu.com',
            'Connection': 'keep-alive'
        }
        r=requests.get(url,headers=headers)
        if r.status_code==200:
            r.encoding='utf-8'
        return r.text

HTML解析器

import re
from urllib.parse import urlparse
from bs4 import BeautifulSoup

class HtmlParser():
    def parser(self,page_url,html_cont):
        if page_url is None or html_cont is None:
            return 'Empty !!!'
        soup=BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')
        new_urls=self._get_new_urls(page_url,soup)
        new_data=self._get_new_data(page_url,soup)
        return new_urls,new_data

    def _get_new_urls(self,page_url,soup):
        new_urls=set()
        links=soup.find_all('a',href=re.compile(r'\b(/item/)'))
        for link in links:
            new_url=link['href']
            new_full_url=urlparse.urljoin(page_url,new_url)
            new_urls.add(new_full_url)
        return new_urls

    def _get_new_data(self,page_url,soup):
        data={}
        data['url']=page_url
        title=soup.find('dd','lemmaWgt-lemmaTitle-title').find('h1')
        data['title']=title.get_text()
        summary=soup.find('div','lemma-summary')
        data['summary']=summary.get_text()
        return data

数据存储器

import codecs

class DataOutput():
    def __init__(self):
        self.datas=[]

    def store_data(self,data):
        if data is None:
            return
        self.datas.append(data)

    def output_html(self):
        fout=codecs.open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/baike.html','w',encoding='utf-8')
        fout.write('<html>')
        fout.write('<body>')
        fout.write('<table>')
        for data in self.datas:
            fout.write('<tr>')
            fout.write('<td>%s</td>'%data['url'])
            fout.write('<td>%s</td>'%data['title'])
            fout.write('<td>%s</td>'%data['summary'])
            fout.write('</tr>')
            self.datas.remove(data)
        fout.write('</table>')
        fout.write('</body>')
        fout.write('</html>')
        fout.close()

爬虫调度器

from firstSpider.Dataoutput import DataOutput
from firstSpider.HtmlDownloader import HtmlDownloader
from firstSpider.Htmlparser import HtmlParser
from firstSpider.UrlManager import UrlManager

class SpiderMan():
    def __init__(self):
        self.manager=UrlManager()
        self.downloader=HtmlDownloader()
        self.parser=HtmlParser()
        self.output=DataOutput()

    def crawl(self,root_url):
        self.manager.add_new_url(root_url)
        while(self.manager.has_new_url() and self.manager.old_url_size()<100):
            try:
                new_url=self.manager.get_new_url()
                html=self.downloader.download(new_url)
                new_urls,data=self.parser.parser(new_url,html)
                self.manager.add_new_urls(new_urls)
                self.output.store_data(data)
                print('已抓取 %s 个链接'%self.manager.old_url_size())
            except Exception as e:
                print('crawl failed')
        self.output.output_html()

spider_man=SpiderMan()
spider_man.crawl('https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711?fr=aladdin')

猜你喜欢

转载自blog.csdn.net/weixin_39777626/article/details/81564198