求大神帮我看看这个报错是怎么回事
是小甲鱼第二版的python教学里面的爬取豆瓣top250 这是代码import requests
import bs4
import re
def open_url(url):
# 使用代理
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}
# res = requests.get(ral, headers=headers, proxies=proxies)
res = requests.get(url, headers=headers)
return res
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# 电影名
movies = []
targets = soup.find_all('div', class_='hd')
for each in targets:
movies.append(each.a.span.text)
# 评分
ranks = []
targets = soup.find_all('span', class_='rating_num')
for each in targets:
ranks.append('评分:%s' %each.text)
# 资料
messages = []
targets = soup.find_all('div', class_='hd')
for each in targets:
try:
messages.append(each.p.text.split('\n').strip() + each.p.text.split('\n').strip())
except:
continue
result = []
length = len(movies)
for i in range(length):
result.append(movies + ranks + messages + '\n')
return result
#找出一共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('span', class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = 'https://movie.douban.com/top250'
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '/?start=' + str(25 * i)
res = open_url(url)
result.extend(find_movies(res))
with open('豆瓣TOP250电影.txt', 'w', encoding='utf-8') as f:
for each in result:
f.write(each)
if __name__ == '__main__':
main()
报错呢? 这是报错的类型
Traceback (most recent call last):
File "I:/Python/爬取豆瓣TOP250.py", line 61, in <module>
main()
File "I:/Python/爬取豆瓣TOP250.py", line 54, in main
result.extend(find_movies(res))
File "I:/Python/爬取豆瓣TOP250.py", line 36, in find_movies
result.append(movies + ranks + messages + '\n')
IndexError: list index out of range Twilight6 发表于 2020-6-14 15:25
报错呢?
发在楼下了 代码在这里
import requests
import bs4
import re
def open_url(url):
# 使用代理
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}
# res = requests.get(ral, headers=headers, proxies=proxies)
res = requests.get(url, headers=headers)
return res
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# 电影名
movies = []
targets = soup.find_all('div', class_='hd')
for each in targets:
movies.append(each.a.span.text)
# 评分
ranks = []
targets = soup.find_all('span', class_='rating_num')
for each in targets:
ranks.append('评分:%s' %each.text)
# 资料
messages = []
targets = soup.find_all('div', class_='hd')
for each in targets:
try:
messages.append(each.p.text.split('\n').strip() + each.p.text.split('\n').strip())
except:
continue
result = []
length = len(movies)
for i in range(length):
result.append(movies + ranks + messages + '\n')
return result
#找出一共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('span', class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = 'https://movie.douban.com/top250'
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '/?start=' + str(25 * i)
res = open_url(url)
result.extend(find_movies(res))
with open('豆瓣TOP250电影.txt', 'w', encoding='utf-8') as f:
for each in result:
f.write(each)
if __name__ == '__main__':
main() import requests
import bs4
import re
def open_url(url):
# 使用代理
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}
# res = requests.get(ral, headers=headers, proxies=proxies)
res = requests.get(url, headers=headers)
return res
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# 电影名
movies = []
targets = soup.find_all('div', class_='hd')
for each in targets:
movies.append(each.a.span.text)
# 评分
ranks = []
targets = soup.find_all('span', class_='rating_num')
for each in targets:
ranks.append('评分:%s' %each.text)
# 资料
messages = []
targets = soup.find_all('div', class_='hd')
for each in targets:
try:
messages.append(each.p.text.split('\n').strip() + each.p.text.split('\n').strip())
except:
continue
result = []
length = len(movies)
for i in range(length):
result.append(movies + ranks + messages + '\n')
return result
#找出一共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('span', class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = 'https://movie.douban.com/top250'
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '/?start=' + str(25 * i)
res = open_url(url)
result.extend(find_movies(res))
with open('豆瓣TOP250电影.txt', 'w', encoding='utf-8') as f:
for each in result:
f.write(each)
if __name__ == '__main__':
main() a1437485261 发表于 2020-6-14 15:26
发在楼下了
IndexError: list index out of range
索引值超出访问 在代码的 36 行 , 你自己检查检查或者发完整代码上来
代码一直发不出来。。说是要审核 Twilight6 发表于 2020-6-14 15:28
IndexError: list index out of range
索引值超出访问 在代码的 36 行 , 你自己检查检查或者发 ...
代码发了一直说要审核 a1437485261 发表于 2020-6-14 15:29
代码发了一直说要审核
报错信息说列表溢出了,说明 i 变量的复制有些问题 a1437485261 发表于 2020-6-14 15:29
代码发了一直说要审核
你是复制小甲鱼的原代码运行的吗?
import requests
import bs4
import re
def open_url(url):
# 使用代理
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}
# res = requests.get(ral, headers=headers, proxies=proxies)
res = requests.get(url, headers=headers)
return res
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# 电影名
movies = []
targets = soup.find_all('div', class_='hd')
for each in targets:
movies.append(each.a.span.text)
# 评分
ranks = []
targets = soup.find_all('span', class_='rating_num')
for each in targets:
ranks.append('评分:%s' %each.text)
# 资料
messages = []
targets = soup.find_all('div', class_='hd')
for each in targets:
try:
messages.append(each.p.text.split('\n').strip() + each.p.text.split('\n').strip())
except:
continue
result = []
length = len(movies)
for i in range(length):
result.append(movies + ranks + messages + '\n')
return result
#找出一共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('span', class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = 'https://movie.douban.com/top250'
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '/?start=' + str(25 * i)
res = open_url(url)
result.extend(find_movies(res))
with open('豆瓣TOP250电影.txt', 'w', encoding='utf-8') as f:
for each in result:
f.write(each)
if __name__ == '__main__':
main() Twilight6 发表于 2020-6-14 15:31
你是复制小甲鱼的原代码运行的吗?
是的 除了没有用代理ip 其他基本都是一样的
result = []
length = len(movies)
for i in range(length):
result.append(movies + ranks + messages + '\n') 这个是36行的代码
return result a1437485261 发表于 2020-6-14 15:33
是的 除了没有用代理ip 其他基本都是一样的
result = []
length = len(movies)
那你复制这个运行看看:
import requests
import bs4
import re
def open_url(url):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res = requests.get(url, headers=headers)
return res
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# 电影名
movies = []
targets = soup.find_all("div", class_="hd")
for each in targets:
movies.append(each.a.span.text)
# 评分
ranks = []
targets = soup.find_all("span", class_="rating_num")
for each in targets:
ranks.append(' 评分:%s ' % each.text)
# 资料
messages = []
targets = soup.find_all("div", class_="bd")
for each in targets:
try:
messages.append(each.p.text.split('\n').strip() + each.p.text.split('\n').strip())
except:
continue
result = []
length = len(movies)
for i in range(length):
result.append(movies + ranks + messages + '\n')
return result
# 找出一共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('span', class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = "https://movie.douban.com/top250"
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '/?start=' + str(25 * i)
res = open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt", "w", encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__ == "__main__":
main() Twilight6 发表于 2020-6-14 15:36
那你复制这个运行看看:
用你这个就可以了。。 a1437485261 发表于 2020-6-14 15:39
用你这个就可以了。。
所以说 你对代码做了什么不为人知的事情,嘿嘿,开玩笑,我看不到完整代码没办法判断哈哈
他说索引超出范围,所以可以判断ranks 或 messages 有个地方提取数据错了,提取少了
在或者 movies 提取多了
Twilight6 发表于 2020-6-14 15:42
所以说 你对代码做了什么不为人知的事情,我看不到完整代码没办法判断哈哈
他说索引超出范围,所 ...
对比了一下代码发现一个地方写错了。谢谢大佬哈哈哈 a1437485261 发表于 2020-6-14 15:42
对比了一下代码发现一个地方写错了。谢谢大佬哈哈哈
客气了~ Twilight6 发表于 2020-6-14 15:36
那你复制这个运行看看:
能给个可以保存到Excel的版本马……最近学极客py学到这里学不下去了
页:
[1]
2