toolkit-frame之toolkit-sprider(数据采集)---笔趣阁小说

采集笔趣阁小说,使用以下几个核心包:

requests:2.10.0

beautifulsoup4:4.7.1

其中:

1、BaseFrame.__log__("开始采集中国船舶网的数据...") BaseFrame.__log__() 就是我个人封装的日之类替换成print就可以。

2、response = requests.get(self.base_url, timeout=30, headers=UserAgent().get_random_header(self.base_url)) 这个就是封装的一个随机header,防止被认为是机器,每次都模拟一个新的浏览器的header。代码是这个:UserAgent().get_random_header(self.base_url) 随机header类:https://blog.csdn.net/zy0412326/article/details/104258491

3、filepath = BaseConfig().CORPUS_ROOT + os.sep + "equipment_info.xlsx" 这个就是文件的路径BaseConfig().CORPUS_ROOT替换成自己的文件路径就可以了。

4、mmEntity = SpriderEntity() 这个就是一个实体类用来记录采集过的数据,程序可以多次执行。防止数据重复采集的策略。

这里我之前写过防止重复采集的文章:https://blog.csdn.net/zy0412326/article/details/103224399

不废话了直接上代码:

import os
import requests
from bs4 import BeautifulSoup
from access.sprider.SpriderAccess import SpriderAccess
from base.BaseConfig import BaseConfig
from base.BaseFrame import BaseFrame
from business.sprider.UserAgent import UserAgent
from object.entity.SpriderEntity import SpriderEntity
from plugin.Tools import Tools


class QuLa:
    base_url = "https://www.qu.la/"
    save_path = BaseConfig().CORPUS_ROOT + os.sep + "QuLa"

    def __init__(self):
        Tools.judge_diskpath_exits_create(self.save_path)
        pass

    def sprider_story(self):
        BaseFrame.__log__("开始采集笔趣阁排行榜小说...")
        self.story_url = self.base_url + "paihangbang"
        try:
            response = requests.get(self.story_url, timeout=30, headers=UserAgent().get_random_header(self.story_url))
            response.encoding = 'UTF-8'
            soup = BeautifulSoup(response.text, "html5lib")
        except Exception as e:
            BaseFrame.__err__("采集出现错误" + str(e))
            pass
        div_list = soup.findAll('div', attrs={"class": 'topbooks'})
        for div in div_list:
            a_list = div.find_all('a', attrs={"target": '_blank'})
            for a in a_list:
                content_url = self.base_url + a.get("href")
                txt_title = a.get("title")
                try:
                    response = requests.get(content_url, timeout=30, headers=UserAgent().get_random_header(content_url))
                    response.encoding = 'UTF-8'
                    soup = BeautifulSoup(response.text, "html5lib")
                    dl_tag = soup.find('dl')
                    a_list = dl_tag.find_all('a')
                    for a_tag in a_list:
                        href = a_tag.get("href")
                        if "book" in href:
                            url = self.base_url + href
                            chapter = a_tag.text

                            mmEntity = SpriderEntity()
                            mmEntity.sprider_base_url = self.base_url
                            mmEntity.create_datetime = Tools.get_current_datetime()
                            mmEntity.sprider_url = url
                            mmEntity.sprider_pic_title = chapter
                            mmEntity.sprider_pic_index = str(1)
                            if SpriderAccess().query_sprider_entity_by_urlandtitle(url, chapter) is None:
                                SpriderAccess().save_sprider(mmEntity)
                                self.get_content(url, chapter, txt_title)
                except Exception as e:
                    BaseFrame.__err__("采集" + content_url + "出现错误" + str(e))
                    pass

            pass

    def get_content(self, url, chapter, title):
        """
        写文件至文本中
        :param url:采集的URL
        :param chapter: 章节名称
        :param title: 小说名称
        :return:
        """
        try:
            BaseFrame.__log__("正在采集" + url + "上的小说...")
            response = requests.get(url, timeout=60, headers=UserAgent().get_random_header(url))
            response.encoding = 'UTF-8'
            soup = BeautifulSoup(response.text, "html5lib")
            content = soup.find('div', attrs={"id": 'content'})
            content = chapter + "\n" + str(content). \
                replace('<br/>', '\n'). \
                replace("<script>chaptererror();</script>", ""). \
                replace("<div id=\"content\">", ""). \
                replace("</div>", "")
            txt_path = self.save_path + os.sep + str(title) + ".txt"
            Tools.write_string_to_txt(txt_path, content)
        except Exception as e:
            BaseFrame.__err__("采集" + chapter + "出现错误" + str(e) + "尝试重新采集.")
            self.get_content(url, chapter, title)
            pass


if __name__ == '__main__':
    QuLa().sprider_story()
pass
发布了41 篇原创文章 · 获赞 9 · 访问量 8515

猜你喜欢

转载自blog.csdn.net/zy0412326/article/details/104258585