Python网络爬虫—字符串实例

Python网络爬虫,首先我们得知道最笨的方法,通过字符串匹配。

以下实例,找到对应网页,爬取图片并保存

# 字符格式查找,网络爬虫爬取图片


import urllib.request
import os

def url_open(url):
    req = urllib.request.Request(url)
    
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/62.0')
    response = urllib.request.urlopen(url)
    html = response.read()
    
    print(url)
    return html
	

def get_page(url):
    html = url_open(url).decode('utf-8')

    a = html.find('current-comment-page') +23
    b = html.find(']',a)
    
    return (html[a:b])


def find_imgs(url):
    html = url_open(url).decode('utf-8')
    img_addrs = []

    a = html.find('img src=')
    
    while a != -1:
        b = html.find('.jpg',a,a+255)

        if b != -1:
          img_addrs.append( html[a+9:b+4])
        else:
            b = a + 9
   
        a = html.find('img src=',b)

    for each in img_addrs:
        print(each)

    return img_addrs
        
  

def save_imgs(folder,img_addrs):
    for each in img_addrs:
        filename = each.split('/')[-1]
        with open(filename,'wb')as f:
           img = url_open(each)
           f.write(img)
  

def xxoo_download_imgs(folder='xxoo_download_imgs',pages =10):
    os.mkdir(folder)
    os.chdir(folder)



    url = "http://jandan.net/ooxx/"
    page_num = int(get_page(url))


    for i in range(pages):
        page_num -= i
        page_url = url + 'page-' + str(page_num) + '#comments'
        img_addrs = find_imgs(page_url)
        save_imgs(folder,img_addrs);
        

if __name__ == '__main__':
    xxoo_download_imgs()

猜你喜欢

转载自blog.csdn.net/ly_xiamu/article/details/83318028