|
发表于 2020-8-22 13:56:08
|
显示全部楼层
import requests
from bs4 import BeautifulSoup
import bs4
import re
from networkx.release import url
def open_url(url):
headers = {'User-Agent': r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}
res =requests.get("https://movie.douban.com/top250",headers= headers)
res.encoding='utf-8'
#res =requests.get(url,headers=headers,proxies = proxies )
res =requests.get(url,headers=headers)
return res
def find_movice(res):
soup =bs4.BeautifulSoup(res.text,'html.parser')
movice=[]
targets = soup.find_all("div", class_= "hd")
for each in targets:
movice.append(each.a.span.text)
ranks =[]
targets =soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append('评分:%s'%each.text)
messages =[]
targets =soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split('\n')[1].strip()+each.p.text.split('\n')[2].strip())
except:
continue
result=[]
length =len(movice)
for i in range(length):
result.append(movice[i]+ranks[i]+messages[i]+'\n')
return result
def find_depth(res):
soup =bs4.BeautifulSoup(res.text,'html.parser')
depth =soup.find('span',class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host ="https://movie.douban.com/top250"
res =open_url(host)
depth =find_depth(res)
result =[]
for i in range(depth):
url = host+'/?start='+str(25*i)
res =open_url(url)
result.extend(find_movice(res))
with open("豆瓣250.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__ == '__main__':
main()
|
|