极客Python爬起豆瓣出现list index out of range错误怎么办
代码基本上照抄的小甲鱼的。最后运行出现list index out of range错误怎么解决啊{:10_266:}有没有大佬有现在可运行的完整代码发一下啊
import requests
import bs4
import re
def open_url(url):
headers={'user-agent':'Mozilla/5.0'}
res=requests.get(url,headers=headers)
return res
def find_movies(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
movies=[]
targets=soup.find_all("div",class_="hd")
for each in targets:
movies.append(each.a.span.text)
ranks=[]
targets=soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append(' 评分:%s ' % each.text)
messages=[]
targets=soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split("\n"),strip()+each.p.text.split("\n").strip())
except:
continue
result=[]
length=len(movies)
for i in range(length):
result.append(movies+ranks+messages+"\n")
return result
def find_depth(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
depth=soup.find("span",class_="next").previous_sibling.previous_sibling.text
return int(depth)
def main():
host="https://movie.douban.com/top250"
res=open_url(host)
depth=find_depth(res)
result=[]
for i in range(depth):
url=host+"/?start=" + str(25 * i)
res=open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__=="__main__":
main()
headers={'user-agent':'Mozilla/5.0'}
这个写错了 代码第27句。写错了。
这一句。messages.append(each.p.text.split("\n")后面是'.strip()',写成了逗号
messages.append(each.p.text.split("\n"),strip()+each.p.text.split("\n").strip()) °蓝鲤歌蓝 发表于 2021-2-3 17:26
这个写错了
{:10_266:}好的 已修改,但运行没反应{:10_245:} YunGuo 发表于 2021-2-3 18:38
代码第27句。写错了。
这一句。messages.append(each.p.text.split("\n")后面是'.strip()',写成了逗号 ...
感谢,已修改,但运行没反应{:10_266:} 报错信息贴出来 修改后的代码
原先有部分手打的出错了{:5_100:}
现在应该跟小甲鱼的一样了
但运行是没反应。。。。{:10_266:}
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res=requests.get(url,headers=headers)
return res
def find_movies(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
movies=[]
targets=soup.find_all("div",class_="hd")
for each in targets:
movies.append(each.a.span.text)
ranks=[]
targets=soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append(' 评分:%s ' % each.text)
messages=[]
targets=soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split("\n").strip()+each.p.text.split("\n").strip())
except:
continue
result=[]
length=len(movies)
for i in range(length):
result.append(movies+ranks+messages+"\n")
return result
def find_depth(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
depth=soup.find("span",class_="next").previous_sibling.previous_sibling.text
return int(depth)
def main():
host="https://movie.douban.com/top250"
res=open_url(host)
depth=find_depth(res)
result=[]
for i in range(depth):
url=host+"/?start=" + str(25 * i)
res=open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__=="__main__":
main()
°蓝鲤歌蓝 发表于 2021-2-3 19:16
报错信息贴出来
不报错就是没反应了{:10_266:} 没反应要么就是没有爬到东西,要么就是你没看到那个生成的文件
你挨个 print 一下,找找问题呗
能 print 的都 print 一下{:10_245:} Daniel_Zhang 发表于 2021-2-3 19:22
没反应要么就是没有爬到东西,要么就是你没看到那个生成的文件
你挨个 print 一下,找找问题呗
{:10_266:} 爬虫成功了 感谢大家 {:5_106:} DSSIMP 发表于 2021-2-3 19:59
爬虫成功了 感谢大家
能不能分享下完全体的代码呀小白路过 还在基础语法趴着 好难 手里尖刀刺 发表于 2021-8-20 09:55
能不能分享下完全体的代码呀小白路过 还在基础语法趴着 好难
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res=requests.get(url,headers=headers)
return res
def find_movies(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
movies=[]
targets=soup.find_all("div",class_="hd")
for each in targets:
movies.append(each.a.span.text)
ranks=[]
targets=soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append(' 评分:%s ' % each.text)
messages=[]
targets=soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split("\n").strip()+each.p.text.split("\n").strip())
except:
continue
n=len(messages)
for i in range(n):
messages=messages.replace("\xa0"," ")
result=[]
length=len(movies)
for i in range(length):
result.append(movies+ranks+messages.replace("\xa0"," ")+"\n")
return result
def find_depth(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
depth=soup.find("span",class_="next").previous_sibling.previous_sibling.text
return int(depth)
def main():
host="https://movie.douban.com/top250"
res=open_url(host)
depth=find_depth(res)
result=[]
for i in range(depth):
url=host+"/?start=" + str(25 * i)
res=open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__=="__main__":
main()
DSSIMP 发表于 2021-8-20 14:04
import requests
import bs4
import re
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res=requests.get(url,headers=headers)
return res
def find_movies(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
movies=[]
targets=soup.find_all("div",class_="hd")
for each in targets:
movies.append(each.a.span.text)
ranks=[]
targets=soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append(' 评分:%s ' % each.text)
messages=[]
targets=soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split("\n").strip()+each.p.text.split("\n").strip())
except:
continue
n=len(messages)
for i in range(n):
messages=messages.replace("\xa0"," ")
result=[]
length=len(movies)
for i in range(length):
result.append(movies+ranks+messages.replace("\xa0"," ")+"\n")
return result
def find_depth(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
depth=soup.find("span",class_="next").previous_sibling.previous_sibling.text
return int(depth)
def main():
host="https://movie.douban.com/top250"
res=open_url(host)
depth=find_depth(res)
result=[]
for i in range(depth):
url=host+"/?start=" + str(25 * i)
res=open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__=="__main__":
main()
手里尖刀刺 发表于 2021-8-20 09:55
能不能分享下完全体的代码呀小白路过 还在基础语法趴着 好难
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res=requests.get(url,headers=headers)
return res
def find_movies(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
movies=[]
targets=soup.find_all("div",class_="hd")
for each in targets:
movies.append(each.a.span.text)
ranks=[]
targets=soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append(' 评分:%s ' % each.text)
messages=[]
targets=soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split("\n").strip()+each.p.text.split("\n").strip())
except:
continue
n=len(messages)
for i in range(n):
messages=messages.replace("\xa0"," ")
result=[]
length=len(movies)
for i in range(length):
result.append(movies+ranks+messages.replace("\xa0"," ")+"\n")
return result
def find_depth(res):
soup=bs4.BeautifulSoup(res.text,"html.parser")
depth=soup.find("span",class_="next").previous_sibling.previous_sibling.text
return int(depth)
def main():
host="https://movie.douban.com/top250"
res=open_url(host)
depth=find_depth(res)
result=[]
for i in range(depth):
url=host+"/?start=" + str(25 * i)
res=open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__=="__main__":
main() C:\Users\gt20180105\Desktop这是什么情况啊 请求无效 手里尖刀刺 发表于 2021-8-20 16:14
这是什么情况啊 请求无效
你没有下载预先下载好requests,先用pip指令下载
页:
[1]