|
|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
下列代码运行OK,爬取前十章内容,但标题与内容是分开保存的,
哪位大侠有更好的方法,标题与内容合并保存?谢谢
from bs4 import BeautifulSoup
import requests
import codecs
import re
#获取总的章节名字及地址
def getHtml(url):
htm2 = requests.get(url,headers)
soup = BeautifulSoup(htm2.content,'lxml')
books_lst = []
name = soup.find('div',class_='listmain')
#父div,子dl,孙dt(总标题)和dd(每一章)
if name:
dd_items = name.find('dl')
dt_num = 0
for n in dd_items.children:
ename = str(n.name).strip()
if ename == 'dt':
dt_num += 1
if ename != 'dd':
continue
books_info = {}
if dt_num == 2:
durls = n.find_all('a')[0]
books_info['name'] = durls.get_text()
books_info['url'] = 'http://www.biqukan.com' + durls.get('href')
books_lst.append(books_info)
return books_lst
#获取每章地址
def get_per_address(name_url):
per_url=[]
for i in range(0,len(name_url)-1340):
p_url=name_url[i].get('url')
per_url.append(p_url)
return per_url
#获取并保存每章内容
def get_charpter_text(url):
html = requests.get(url,headers)
soup = BeautifulSoup(html.content,'lxml')
x_cnt = soup.find('div',attrs={'id':'content'})
cont=x_cnt.get_text()
cont = [str(cont).strip().replace('\r \xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0', '').replace('\u3000\u3000', '')]
f_cont = " ".join(cont)
path='C:\\Users\Administrator\\Desktop\\new{}.txt'.format(x+2)
file = open( path,'a+',encoding='utf-8')
file.write(f_cont+'\n\n')
file.close()
#获取每章标题
def get_per_title(name_url):
per_tit=[]
for i in range(0,len(name_url)-1340):
p_tit=name_url[i].get('name')
per_tit.append(p_tit)
return per_tit
#保存
def save_text(name_url):
purl=get_per_address(name_url)
for p2 in range(0,len(purl)):
purl2=purl[p2]
get_charpter_text(purl2)
#保存每章标题
name_tit=get_per_title(name_url)
for t1 in range(0,len(name_tit)):
f_tit=name_tit[t1]
path='C:\\Users\Administrator\\Desktop\\new{}.txt'.format(x+1)
file = open( path,'a+',encoding='utf-8')
file.write(f_tit+'\n\n')
file.close()
if __name__ == '__main__':
url='http://www.biqukan.com/1_1094/'
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
x=5
name_url=getHtml(url)
save_text(name_url)
|
|