'''
爬取视频
请求URL
http://www.xiaohuar.com/v/
请求方式
GET
请求头信息
User-Agent:用户代理
'''
import time
import requests
# 爬虫三部曲
# 1.发送请求
def get_page(url):
response = requests.get(url)
return response
# 2.解析数据
import re
def parse_index(html):
# findall匹配所有
# re.findall('正则匹配规则','匹配文本','匹配模式')
# re.S:对全部文本进行搜索匹配
detail_urls = re.findall('<div class="items"><a class="imglink" href="(.*?)"',html, re.S )
return detail_urls
# for detail_url in detail_urls:
# print(detail_url)
# 解析详情页
def parse_detail(html):
movie_url = re.findall('<source src="(.*?)">',html,re.S)
# print(movie_url)
if movie_url:
return movie_url[0] #只返回视频的详细网址
# 3.保存数据
import uuid
# uuid.uuid4() 根据时间
def save_video(content):
with open('{}.mp4'.format(uuid.uuid4()),'wb') as f:
f.write(content)
print('视频下载完毕')
# main + 回车键
if __name__ == '__main__':
for line in range(6):
url = 'http://www.xiaohuar.com/list-3-{}.html'.format(line)
# 发送请求
response = get_page(url)
# print(response)
##返回响应状态码
# print(response.status_code)
# 返回响应文本
#print(response.text)
#解析主页页面
detail_urls = parse_index(response.text)
#循环遍历详情页url
for detail_url in detail_urls:
print(detail_url)
# 往每一个详情页发送请求
detail_response = get_page(detail_url)
#print(response.text)
#解析详情页获取视频url
movie_url = parse_detail(detail_response.text)
#判断视频url存在则打印
if movie_url:
print(movie_url)
#往视频url发送请求获取视频二进制流
movie_response = get_page(movie_url)
#把视频的二进制流传给save_video函数去保存到本地
save_video(movie_response.content)