鱼cpython学习者 发表于 2020-8-22 10:50:28

reques.get豆瓣top250失败

>>> import requests
>>> res = requests.get('https://movie.douban.com/top250')
>>> print(res.text)

>>> res
<Response >
我get了一下www.baidu.com,没问题啊
有没有人知道这是咋回事

疾风怪盗 发表于 2020-8-22 13:26:28

import requests
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3775.400 QQBrowser/10.6.4208.400'}
res = requests.get('https://movie.douban.com/top250',headers=headers)
print(res.text)

最简单的加个headers试试

无敌小猛男 发表于 2020-8-22 13:56:08

import requests
from bs4 import BeautifulSoup
import bs4
import re

from networkx.release import url



def open_url(url):
    headers = {'User-Agent': r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}
    res =requests.get("https://movie.douban.com/top250",headers= headers)
    res.encoding='utf-8'
    #res =requests.get(url,headers=headers,proxies = proxies )
    res =requests.get(url,headers=headers)
    return res
def find_movice(res):
    soup =bs4.BeautifulSoup(res.text,'html.parser')
    movice=[]
    targets = soup.find_all("div", class_= "hd")
    for each in targets:
      movice.append(each.a.span.text)
    ranks =[]
    targets =soup.find_all("span",class_="rating_num")
    for each in targets:   
      ranks.append('评分:%s'%each.text)
    messages =[]
    targets =soup.find_all("div",class_="bd")
    for each in targets:
      try:
            messages.append(each.p.text.split('\n').strip()+each.p.text.split('\n').strip())
      except:
            continue
    result=[]
    length =len(movice)
    for i in range(length):
      result.append(movice+ranks+messages+'\n')
    return result
def find_depth(res):
    soup =bs4.BeautifulSoup(res.text,'html.parser')
    depth =soup.find('span',class_='next').previous_sibling.previous_sibling.text
    return int(depth)
def main():
    host ="https://movie.douban.com/top250"
    res =open_url(host)
    depth =find_depth(res)
    result =[]
    for i in range(depth):
      url = host+'/?start='+str(25*i)
      res =open_url(url)
      result.extend(find_movice(res))
    with open("豆瓣250.txt","w",encoding="utf-8") asf:
      for each in result:
            f.write(each)


if __name__ == '__main__':
    main()

suchocolate 发表于 2020-8-22 15:07:37

像2楼一样改一下ua就行。

鱼cpython学习者 发表于 2020-8-22 15:20:39

疾风怪盗 发表于 2020-8-22 13:26
最简单的加个headers试试

多谢了
页: [1]
查看完整版本: reques.get豆瓣top250失败