from urllib import request
from multiprocessing import Process
import os
def downloader(url,**kwargs):
print('当前进程id:',os.getpid(),os.getppid())
print('关键字参数:',kwargs['pro'])
#文件名
file_name=url.split('/')[-1]
#网络请求
response=request.urlopen(url) #一个响应对象
#获取相应的内容
content=response.read()
#保存
with open(file_name,'wb') as fp:
fp.write(content)
if __name__ == '__main__':
# for i in range(1,5,1):
# url='http://www.langlang2017.com/img/banner'+str(i)+'.png'
# print(url)
# p=Process(target=doenloader,args=(url,))
name_list=['进程1-蒋博文','进程2-少女周']
url_list=['https://img.alicdn.com/imgextra/i1/55316976/O1CN01LaLGVN21P3SIS5uZZ_!!0-saturn_solar.jpg_220x220.jpg',
'https://img.alicdn.com/imgextra/i2/122400877/O1CN01dpfo471ILhqr9u3Ic_!!0-saturn_solar.jpg_220x220.jpg'
]
p_list=[] #进程列表
i=0
for url in url_list:
n=name_list[i]
i+=1
p=Process(target=downloader,name=n,args=(url,),kwargs={'pro':'最高等级'})
print('当前子进程%s将要被运行:'%p.name)
p.start()
p_list.append(p)
for p in p_list:
p.join()
print('主进程id:',os.getpid())
#确保这句代码在所有子进程之后
print('--------------------所有文件下载完成------------')
爬虫初学05:创建进程的方法二
猜你喜欢
转载自blog.csdn.net/yuanzhen1/article/details/88427145
今日推荐
周排行