一般浏览器都会通过检查User-Agent来判断到底是浏览器正常访问呢,还是爬虫非法访问,所以我们只需要在代码里设置一下,让浏览器检测出User-Agent即可,具体操作如下:
找到有道翻译-审查元素-Netbook-headers-User-Agent,将内容复制如下:
Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0
第一种方法如下:
import urllib.request
import urllib.parse
import json
contend=input('请输入 需要翻译的内容:')
url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
#隐藏的第一种方法
head={}
head['User-Agent']='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
#隐藏的第二种方法
data={}
data['i']=contend
data['from']='AUTO'
data['to']='AUTO'
data['smartresult']='dict'
data['client']='fanyideskweb'
data['salt']='1538989442643'
data['sign']='40954ebe6d906735813c2cd7c2274733'
data['doctype']='json'
data['version']='2.1'
data['keyfrom']='fanyi.web'
data['action']='FY_BY_CLICKBUTTION'
data['typoResult']='false'
#利用urlencode把它编码成url的形式
data=urllib.parse.urlencode(data).encode('utf-8')
req=urllib.request.Request(url,data,head)
#req.add_header()
response=urllib.request.urlopen(url,data)
html=response.read().decode('utf-8')
target=json.loads(html)
print('翻译结果:%s'%(target['translateResult'][0][0]['tgt']))
运行结果:
第二种方法:
import urllib.request
import urllib.parse
import json
import time
while True:
contend=input('请输入 需要翻译的内容:(输入‘q’则是退出程序)')
if contend=='q!':
break
url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
#隐藏的第一种方法
#head={}
#head['User-Agent']='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
data={}
data['i']=contend
data['from']='AUTO'
data['to']='AUTO'
data['smartresult']='dict'
data['client']='fanyideskweb'
data['salt']='1538989442643'
data['sign']='40954ebe6d906735813c2cd7c2274733'
data['doctype']='json'
data['version']='2.1'
data['keyfrom']='fanyi.web'
data['action']='FY_BY_CLICKBUTTION'
data['typoResult']='false'
#利用urlencode把它编码成url的形式
data=urllib.parse.urlencode(data).encode('utf-8')
req=urllib.request.Request(url,data)
#隐藏的第二种方法
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0')
response=urllib.request.urlopen(url,data)
html=response.read().decode('utf-8')
target=json.loads(html)
print('翻译结果:%s'%(target['translateResult'][0][0]['tgt']))
time.sleep(5)
运行结果: