爬图-python

写在前面:

爬取评论、图片是入门爬虫练习正则bs4的基础,不要想歪。

对url:https://www.nvshens.com/进行爬取

功能:

爬取所搜索的某一位模特的全部套图
并且每个套图单独成一个文件夹
并且加入了check功能,以前爬取过的不会重复爬取
改主函数里的url和path名就可使用

效果:

13329264-ebe424d9d817fcb7.png

代码:

import os
import requests
import re
import time


def url_open(url):

    headers = {'User-Agent':
                 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
                 ,"Referer":"http://www.mzitu.com/"}
    response = requests.get(url, headers=headers)
    return response    


def get_page(url, PATH):
    if os.path.exists(PATH):
        os.chdir(PATH)
    else:
        os.mkdir(PATH)
        os.chdir(PATH)    
    if os.path.exists('logfile_check.log'):
        check_file = open('logfile_check.log', 'r', encoding='utf8')
        page_addrs = check_file.read().split()
    else:
        page_addrs = []
    
    phtml = url_open(url).text
    # print(phtml)

    page_p = r"<div class='igalleryli_title'><a href='([^']+)' class='caption'>"
    #print(re.findall(page_p, phtml))
    #print(len(re.findall(page_p, phtml)))
    find_taotu_url = re.findall(page_p, phtml)
    temp_str = 'https://www.nvshens.com'
    
    temp_addrs = []

    for i in find_taotu_url:
        #print(temp_str + i)
        if (temp_str + i) in page_addrs:
            continue
        else:
            temp_addrs.append((temp_str + i))
        if os.path.exists('logfile_check.log'):
            check_file.close()
        true_page_addrs = list(set(temp_addrs))
        true_page_addrs.sort(key=temp_addrs.index)
    return true_page_addrs
        
def get_img(html, PATH):
    page_addrs = get_page(html, PATH)
    print(page_addrs)
    for page_url in page_addrs:
        FLAG = False
        
        if os.path.exists('logfile_check.log'):
            FLAG = True
        logfile = open("logfile_check.log", 'a', encoding='utf8')
        print(page_url)
        if FLAG:
            print('\n' + page_url, file=logfile)
        else:
            print(page_url, file=logfile)
        logfile.close()
        img_html1 = url_open(page_url).text
        

        #p = r"</a><a href='/g/28687/(\d+).html' >"
        p = r".html' >(\d+)</a>"
        p_url = re.findall(p, img_html1)
        #print(p_url) 
        x = int(p_url[-1])
        name_str = '<h1 id="htilte">([^"]+)</h1>'
        name_str_list = re.findall(name_str, img_html1)
        #print(name_str_list)
        try:
            os.mkdir(name_str_list[0])
            os.chdir(name_str_list[0])
            
            for i in range(1, x+1):
                img_url = page_url + str(i) + '.html'
                
                img_html2 = url_open(img_url).text
                img_p = r"<img src='([^']+\.jpg)' alt='"
                img_addrs = re.findall(img_p, img_html2)
                #print('-'* 20)
                #print(img_addrs)                
                for each in img_addrs:
                    #print(each)
                    file = each.split("/")[-1]
                    with open(file, "wb") as f:
                        img = url_open(each).content
                        f.write(img)
                    time.sleep(1)
            
            os.chdir(os.path.abspath(os.path.dirname(os.getcwd())))
        except:
            print('{}{}'.format(name_str_list[0], page_url))
            os.chdir(os.path.abspath(os.path.dirname(os.getcwd())))
    

if __name__ == '__main__':
    url = "https://www.nvshens.com/girl/20763/album/"
    PATH = 'Zhouyanxi'
    get_img(url, PATH)

猜你喜欢

转载自blog.csdn.net/weixin_34268843/article/details/87424200