求助个爬豆瓣top的问题 照着书上来的
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import requests
import os
def open_url(url):
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
res = requests.get(url,headers = headers)
return res
def find_movies(res):
soup = BeautifulSoup(res.text,"html.parser")
#movie_name
movie = []
targets = soup.find_all("div",class_="hd")
for each in targets:
movie.append(each.a.span.text)
#movie_grade
grade = []
targets = soup.find_all("span", class_="rating_num")
for each in targets:
grade.append("评分:%s" % each.text)
#movie_info
info = []
targets = soup.find_all("div", class_="bd")
for each in targets:
try:
info.append(each.p.text.spilt("\n").strip()+each.p.text.spilt("\n").strip())
except:
continue
result = []
length = len(movie)
for i in range(length):
result.append(movie + grade + info+'\n')
return result
#find all pages
def find_depth(res):
soup = BeautifulSoup(res.text, "html.parser")
depth = soup.find("span",class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = "https://movie.douban.com/top250"
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host +"/?start=" + str(25 * i)
res = open_url(url)
result.extend(find_movies(res))
with open("Douban top250 movie info.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__ == "__main__":
main()
上面是代码,下面是错误信息。
Traceback (most recent call last):
File "C:/**/douban2.py", line 68, in <module>
main()
File "C:/**/douban2.py", line 61, in main
result.extend(find_movies(res))
File "C:/**/douban2.py", line 43, in find_movies
result.append(movie + grade + info+'\n')
IndexError: list index out of range
不知道为什么会错误,,越界也没有啊
求高人指点迷津 import os
import re
import time
import requests
from bs4 import BeautifulSoup
def download(url, page):
print(f"正在爬取:{url}")
html = requests.get(url).text # 这里不加text返回<Response >
soup = BeautifulSoup(html, 'html.parser')
lis = soup.select("ol li")
for li in lis:
index = li.find('em').text
title = li.find('span', class_='title').text
rating = li.find('span', class_='rating_num').text
strInfo = re.search("(?<=<br/>).*?(?=<)", str(li.select_one(".bd p")), re.S | re.M).group().strip()
infos = strInfo.split('/')
year = infos.strip()
area = infos.strip()
type = infos.strip()
write_fo_file(index, title, rating, year, area, type)
page += 25
if page < 250:
time.sleep(2)
download(f"https://movie.douban.com/top250?start={page}&filter=", page)
def write_fo_file(index, title, rating, year, area, type):
f = open('movie_top250.csv', 'a')
f.write(f'{index},{title},{rating},{year},{area},{type}\n')
f.closed
def main():
if os.path.exists('movie_top250.csv'):
os.remove('movie_top250.csv')
url = 'https://movie.douban.com/top250'
download(url, 0)
print("爬取完毕。")
if __name__ == '__main__':
main() 六小鸭 发表于 2020-4-14 16:44
大佬我想知道我这哪错了 没报错啊 六小鸭 发表于 2020-4-14 16:50
没报错啊
https://s1.ax1x.com/2020/04/14/JSkuM8.jpg
我俩个编辑器都报错 在我这里没报错 哦不我错了 我研究研究 六小鸭 发表于 2020-4-14 16:55
哦不我错了
我说的我的代码有问题{:10_243:} laogesix 发表于 2020-4-14 16:59
我说的我的代码有问题
对不起,能成自己的了 你可以百度一下
我帮你@点大佬
@不二如是 @一个账号 六小鸭 发表于 2020-4-14 17:07
你可以百度一下
我帮你@点大佬
@不二如是 @一个账号
这个应该百度不到,谢谢了
页:
[1]