用requests + re库做的小练习:定向爬取淘宝商品信息(分页)
url : 'https://s.taobao.com/search?q=' + ‘书包’+ ‘&s=’+ str(44*i)
import requests import re def getHTMLText(url): try: r = requests.get(url, timeout = 30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return "" def parsePage(ilt, html): try: plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"', html) tlt = re.findall(r'\"raw_title\"\:\".*?"', html) for i in range(len(plt)): #eval()函数可以将获得的字符串外面的单引号或者双引号去掉 price = eval(plt[i].split(':')[1]) title = eval(tlt[i].split(':')[1]) ilt.append([price, title]) except: print("") def printGoodsList(ilt): #先定义打印模板 #分为三个位置,后面的数字表示给定的长度 tplt = "{:4}\t{:8}\t{:16}" print(tplt.format("序号","价格","商品名称", chr(12288))) count = 0 for g in ilt: count = count + 1 print(tplt.format(count, g[0], g[1], chr(12288))) def saveGoodsList(list): pass def main(): goods = '书包' depth = 2 start_url = 'https://s.taobao.com/search?q=' + goods infoList = [] for i in range(depth): try: url = start_url + '&s=' + str(44*i) html = getHTMLText(url) print parsePage(infoList, html) print(infoList) except: continue printGoodsList(infoList) if __name__ == '__main__': main()
经验总结:
1.re库匹配页面内容,bs4库匹配网页节点信息