本帖最后由 qiuyouzhi 于 2020-4-18 13:03 编辑
这直接写个循环遍历一遍就行:import requests
import bs4
import re
headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res = requests.get("https://www.agefans.tv/recommend",headers=headers)
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
for i in range(1, 6):
url = f"https://www.agefans.tv/recommend?page={i}"
print(url)
res = requests.get(url, headers=headers)
soup = bs4.BeautifulSoup(res.text, 'html.parser')
#动漫名
soup = bs4.BeautifulSoup(res.text, 'html.parser')
animation = []
targets = soup.find_all("li",class_="anime_icon2")
for each in targets:
animation.append(each.h4.a.text)
#集数
soup = bs4.BeautifulSoup(res.text, 'html.parser')
jishu = []
targets = soup.find_all("li",class_="anime_icon2")
for each in targets:
jishu.append(each.a.span.text)
result = []
length = len(animation)
for i in range(length):
result.append(animation[i] + jishu[i] + '\n')
#lastpage = soup.find(name='a', attrs={'class': ['pbutton', 'asciifont']}, text=re.compile('尾页'))
#print(lastpage.attrs['href'].split('=')[-1])
with open("age动漫推荐.txt", "a", encoding="utf-8") as f:
for each in result:
f.write(each)
|