|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 君子好逑 于 2020-8-22 22:18 编辑
- import requests
- import bs4
- import re
- def open_html(url):
- headers = {
- 'Accept-Language': 'zh-CN',
- 'Cache-Control': 'no-cache',
- 'Connection': 'Keep-Alive',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
- }
- res = requests.get(url=url,headers=headers)
- html = res.text
- return html
- def get_mulu(): #拿到小说目录的地址
- url1 = 'https://www.biduo.cc/search.php?q='
- name = input("请输入小说名:")
- global f
- f = open(name + '.txt','w')
- url = url1 + name
- headers = {
- 'Accept-Language': 'zh-CN',
- 'Cache-Control': 'no-cache',
- 'Connection': 'Keep-Alive',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
- }
- res = requests.get(url,headers=headers)
- s = bs4.BeautifulSoup(res.text,features="lxml")
- xiaoshuo = s.find("h3",class_="result-item-title result-game-item-title") #找到 第一个 标签(即为搜索榜一) 找全部可用s.find_all
- url_ = xiaoshuo.find("a").get ('href')# 在获得的标签中 继续找到 a 标签,并get 到 href 属性
- return ("https://www.biduo.cc" + url_) #加前缀
- def get_html(url):
- headers = {
- 'Accept-Language': 'zh-CN',
- 'Cache-Control': 'no-cache',
- 'Connection': 'Keep-Alive',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
- }
- response = requests.get(url=url,headers=headers)
- html = response.text
- return html
- def get_first(url):
- html = open_html(url)
- regular1 = re.compile('<dd><a href="(.*?)" >.*?</a></dd>')
- list1 = regular1.findall(html)
- url_1 = 'https://www.biduo.cc'
- for each in list1:
- url = url_1 + each
- html = open_html(url)
- if ('第一章') in html:
- break
- return url
- def get_text(html):
- regular = re.compile("(.*?) ")
- m = regular.findall(html)
- if m:
- m.pop(0)
- for each in m:
- print(each.split('<br>')[0])
- f.write(each.split('<br>')[0])
- f.write('\n')
- def get_next(html):
- url_1 = 'https://www.biduo.cc'
- regular = re.compile(r'.*?">上一章</a> ← <a href=".*?">章节列表</a> .*?; <a href="(.*?)"')
- m = regular.findall(html)
- url_2 = m.pop()
- next_address = url_1 + url_2
- return next_address
- def get_title(html):
- title = re.findall('<h1>(.*?)</h1>',html)
- title = title[0]
- return title
- def main():
- catalog_address = get_mulu()
- number = int(input('请输入您想获取的章节总数:'))
- url = get_first(catalog_address)
- for each in range(number):
- html = get_html(url)
- title = get_title(html)
- next_address = get_next(html)
- f.write(title)
- f.write('\n')
- f.write('\n')
- get_text(html)
- url = next_address
- f.write('\n')
- f.write('\n')
- f.close()
- if __name__ == "__main__":
- main()
-
-
复制代码
在大佬们的帮助下初步完成了这个程序。爬了无敌真寂寞的前三章和元尊的前六章(因为元尊的前三章都是更新预告),倒是还行。谢谢大佬们的指点。就是爬完之后发现爬的挺慢的,希望大佬们能给我一些改进的意见 |
|