python豆瓣登陆问题
本帖最后由 Twilight6 于 2021-1-22 16:59 编辑代码如下
import re
import urllib.request
from http.cookiejar import CookieJar
# 豆瓣的登录url
loginurl = 'https://www.douban.com/accounts/login'
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
cookie = CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor)
opener.addheaders =
data = {
"form_email":"your email",
"form_password":"your password",
"source":"index_nav"
}
data = {}
data['form_email'] = '你的账号'
data['form_password'] = '你的密码'
data['source'] = 'index_nav'
response = opener.open(loginurl, urllib.parse.urlencode(data).encode('utf-8'))
#验证成功跳转至登录页
if response.geturl() == "https://www.douban.com/accounts/login":
html = response.read().decode()
#验证码图片地址
imgurl = re.search('<img id="captcha_image" src="(.+?)" alt="captcha" class="captcha_image"/>', html)
if imgurl:
url = imgurl.group(1)
# 将验证码图片保存至同目录下
res = urllib.request.urlretrieve(url, 'v.jpg')
# 获取captcha-id参数
captcha = re.search('<input type="hidden" name="captcha-id" value="(.+?)"/>' ,html)
if captcha:
vcode = input('请输入图片上的验证码:')
data["captcha-solution"] = vcode
data["captcha-id"] = captcha.group(1)
data["user_login"] = "登录"
# 提交验证码验证
response = opener.open(loginurl, urllib.parse.urlencode(data).encode('utf-8'))
# 登录成功跳转至首页 '''
if response.geturl() == "http://www.douban.com/":
print('登录成功!')
为什么出现这种错误,找了好久都不知道怎么改{:10_266:}
Traceback (most recent call last):
File "F:/WebSpider/54/douban.py", line 22, in <module>
response = opener.open(loginurl, urllib.parse.urlencode(data).encode('utf-8'))
File "C:\Users\LENOV\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 522, in open
req = meth(req)
File "C:\Users\LENOV\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 1271, in do_request_
for name, value in self.parent.addheaders:
ValueError: not enough values to unpack (expected 2, got 1) 很少用urllib了,好像是headers给的少了,随便加1个其他头就过了:
loginurl = 'https://www.douban.com/accounts/login'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)', 'Connection': 'keep-alive'}
cookie = CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
opener.addheaders = suchocolate 发表于 2021-1-22 17:58
很少用urllib了,好像是headers给的少了,随便加1个其他头就过了:
headers给的少了是什么意思,我按照你的格式修改了一下,还是不行,一样的错误{:10_266:} 那位大人 发表于 2021-1-22 19:06
headers给的少了是什么意思,我按照你的格式修改了一下,还是不行,一样的错误
我这没有输出报错:import re
import urllib.request
from http.cookiejar import CookieJar
# 豆瓣的登录url
loginurl = 'https://www.douban.com/accounts/login'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)', 'Connection': 'keep-alive'}
cookie = CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
opener.addheaders =
data = {
"form_email": "your email",
"form_password": "your password",
"source": "index_nav"
}
response = opener.open(loginurl, urllib.parse.urlencode(data).encode('utf-8'))
# 验证成功跳转至登录页
if response.geturl() == "https://www.douban.com/accounts/login":
html = response.read().decode()
# 验证码图片地址
imgurl = re.search('<img id="captcha_image" src="(.+?)" alt="captcha" class="captcha_image"/>', html)
if imgurl:
url = imgurl.group(1)
# 将验证码图片保存至同目录下
res = urllib.request.urlretrieve(url, 'v.jpg')
# 获取captcha-id参数
captcha = re.search('<input type="hidden" name="captcha-id" value="(.+?)"/>', html)
if captcha:
vcode = input('请输入图片上的验证码:')
data["captcha-solution"] = vcode
data["captcha-id"] = captcha.group(1)
data["user_login"] = "登录"
# 提交验证码验证
response = opener.open(loginurl, urllib.parse.urlencode(data).encode('utf-8'))
# 登录成功跳转至首页 '''
if response.geturl() == "http://www.douban.com/":
print('登录成功!') 本帖最后由 那位大人 于 2021-1-23 09:55 编辑
suchocolate 发表于 2021-1-22 19:58
我这没有输出报错:
是headers不同的问题吗,我用我浏览器查的豆瓣headers,这跟你给的不一样;还有,运行倒是没错了,但啥也没有出现,就空白一片,没有验证码输入的提示{:10_257:} 那位大人 发表于 2021-1-23 09:52
是headers不同的问题吗,我用我浏览器查的豆瓣headers,这跟你给的不一样;还有,运行倒是没错了,但啥 ...
headers写多种(mozilla、applewebkit,chrome)反倒可能触发反扒。headers只要不是默认的python-urllib,一般不会触发反扒。豆瓣登陆判断没那么简单,一下是以前别人写的:import requests
from lxml import etree
def main():
url = 'https://accounts.douban.com/j/mobile/login/basic'
headers = {'user-agent': 'firefox',
'X-Requested-With': 'XMLHttpRequest',
'cookie': 'bid=hIYyIduorJA; douban-fav-remind=1; __utma=30149280.307390223.1595495492.1596504502.1598348956.4; __utmz=30149280.1598348956.4.4.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/passport/login; __gads=ID=e5de2b79763a1dbd:T=1596504503:S=ALNI_Ma9hnIcfLisdJZ13EZO1GHBsanigg; apiKey=; _pk_id.100001.2fad=20a3a4214f898ade.1598348634.1.1598351524.1598348634.; _pk_ses.100001.2fad=*; last_login_way=account; push_noty_num=0; push_doumail_num=0; __utmc=30149280; __utmv=30149280.14229; douban-profile-remind=1; ps=y'}
data = dict()
data['ck'] = '_J_B'
data['name'] = '填入账号'
data['password'] = '填入密码'
data['remember'] = 'false'
data['ticket'] = ''
r = requests.post(url, headers=headers, data=data)
result = r.json()
print(result['description'])
if __name__ == '__main__':
main()
可以看到还得用cookie,判断也不是靠返回的header里的url判断,要看返回的json数据。 suchocolate 发表于 2021-1-23 10:07
headers写多种(mozilla、applewebkit,chrome)反倒可能触发反扒。headers只要不是默认的python-urlli ...
好的,谢谢大佬,懂了{:10_257:}
页:
[1]