|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import urllib.request
- from bs4 import BeautifulSoup
- import re
- url_list = []
- chapter = []
- f = open('无敌真寂寞.txt','w')
- def get_url(num=2):
- url = 'https://www.biduo.cc/biquge/39_39888/c'
- for i in range(num):
- chapter.append(i)
- num = 13353637+i
- num = str(num)
- url = url + num + '.html'
- url_list.append(url)
- url = 'https://www.biduo.cc/biquge/39_39888/c'
-
- def get_html(url):
- headers = {
- 'Accept-Language': 'zh-CN',
- 'Cache-Control': 'no-cache',
- 'Connection': 'Keep-Alive',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
- }
- res = urllib.request.Request(url=url,headers=headers)
- response = urllib.request.urlopen(res)
- html = response.read().decode("gbk",'ignore')
- return html
- def get_text(html):
- regular = re.compile(".*?<br><br> ")
- m = regular.findall(html)
- for each in m:
- each = each[:-32]
- print(' ',end='')
- if ' ' in each :
- print(each.split(';')[-1])
- f.write(each.split(';')[-1])
- f.write('\n')
- else:
- print(each)
- f.write(each)
- f.write('\n')
- def main():
- number = int(input('请输入您想获取的章节总数:'))
- get_url(number)
- for url in url_list:
- i = chapter[0]
- f.write('第'+str(i+1)+'章')
- f.write('\n')
- f.write('\n')
- f.write('\n')
- chapter.pop(0)
-
- html = get_html(url)
- get_text(html)
- f.close()
- if __name__ == "__main__":
- main()
复制代码
爬了个小说,结果每一章跟下一章的地址不连续,除了去目录爬地址,还有啥方法吗,求大佬指点 |
|