有道翻译爬虫问题请教
在学习爬虫实战这一讲中,实现有道翻译其中data里面多了几项每次翻译都是变化的,如下标记的
这样每次返回"errorCode":50。于是怀疑是否是禁止爬虫脚本,增加User-Agent后也不好使。请问是哪里的问题?
# p14_2.py
import urllib.request
import urllib.parse
url = "http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule"
data = {}
head = {}
data['i'] = 'I love FishC.com!'
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
#data['salt'] = '15859858781330'
#data['sign'] = 'e84cc0ddc3e0c98eff7302d98a328857'
#data['ts'] = '1585985878133'
data['bv'] = '70244e0061db49a9ee62d341c5fed82a'
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CLICKBUTTION'
head['Referer'] = 'http://fanyi.youdao.com/'
head['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url,data,head)
html = response.read().decode('utf-8')
print(html)
将 url 中的 _o 去掉,因为网站增加了反爬虫机制:
# p14_2.py
import urllib.request
import urllib.parse
url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule"
data = {}
data['i'] = 'I love FishC.com!'
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['bv'] = '70244e0061db49a9ee62d341c5fed82a'
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CLICKBUTTION'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url, data)
html = response.read().decode('utf-8')
print(html) zltzlt 发表于 2020-4-4 17:00
将 url 中的 _o 去掉,因为网站增加了反爬虫机制:
能详细解释下原理吗?我目前只学习到通过修改User-Agent来让其认为是浏览器打开 Pyshell 发表于 2020-4-4 17:17
能详细解释下原理吗?我目前只学习到通过修改User-Agent来让其认为是浏览器打开
没有原理,这是固定的(对于爬取有道翻译) zltzlt 发表于 2020-4-4 17:18
没有原理,这是固定的(对于爬取有道翻译)
好吧。多谢多谢~ import urllib.request
import urllib.parse
import json
import time
while True:
content = input('请输入需要翻译的内容(输入"q!"退出程序):')
if content == 'q!':
break
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
'''
head = {}
head['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
'''
data = {}
data['i'] = content
data['type'] = 'AUTO'
data['doctype'] = 'json'
data['version'] = '1.6'
data['keyfrom'] = 'fanyi.web'
data['ue'] = 'UTF-8'
data['typoResult'] = 'true'
data = urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url, data)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0')
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
target = json.loads(html)
target = target['translateResult']['tgt']
print(target)
time.sleep(5)
老八秘制 发表于 2020-4-4 17:22
???? 一个账号 发表于 2020-4-4 17:27
????
这个网址是去掉_o,然后data模块照着小甲鱼写的 老八秘制 发表于 2020-4-4 17:57
这个网址是去掉_o,然后data模块照着小甲鱼写的
C:\Users\Administrator\Desktop\1.jpg
我问的就是为什么要去掉这个_o,从网页“审查元素”看是包含_o的 Pyshell 发表于 2020-4-4 18:28
我问的就是为什么要去掉这个_o,从网页“审查元素”看是包含_o的
见图片 Pyshell 发表于 2020-4-4 18:29
见图片
因为你是用浏览器访问的,而你用爬虫访问时浏览器认出你是爬虫,所以就返回 {"errorCode":50} zltzlt 发表于 2020-4-4 20:10
因为你是用浏览器访问的,而你用爬虫访问时浏览器认出你是爬虫,所以就返回 {"errorCode":50}
但是我修改了User-Agent后依然不好使。
又比如下面的例子中修改了User-Agent依然报错HTTP Error 403: Forbidden
显然又是被反爬了。这就搞不懂了。
import urllib.request
import os
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.65 Safari/537.36')
response = urllib.request.urlopen(url)
html = response.read()
return html
def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('current-comment-page') + 23
b = html.find(']', a)
return html
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
while a != -1:
b = html.find('.jpg', a, a+255)
if b != -1:
img_addrs.append(html)
else:
b = a + 9
a = html.find('img src=', b)
return img_addrs
def save_imgs(folder, img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename, 'wb') as f:
img = url_open(each)
f.write(img)
def download_mm(folder='OOXX', pages=10):
os.mkdir(folder)
os.chdir(folder)
url = "http://jandan.net/ooxx/"
page_num = int(get_page(url))
for i in range(pages):
page_num -= i
page_url = url + 'page-' + str(page_num) + '#comments'
img_addrs = find_imgs(page_url)
save_imgs(folder, img_addrs)
if __name__ == '__main__':
download_mm()
zltzlt 发表于 2020-4-4 17:18
没有原理,这是固定的(对于爬取有道翻译)
有原理的,JS在本地生成随机字符串,加上_o会随机生成一个时间戳
页:
[1]