马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
import requests, os
from lxml import etree
from fake_useragent import UserAgent
import time
if not os.path.exists('妖怪'):
os.mkdir('妖怪')
class goblin(object):
def __init__(self):
self.url = "http://www.cbaigui.com/?paged={}"
ua = UserAgent(verify_ssl=False) #是否跳过证书验证,有些网站需要验证才能访问,但你跳过验证就可以正常访问
self.headers = {
'User-Agent': ua.random, #可生成不同浏览器的UA头,UserAgent().chrome 可生成随机谷歌UA头
'Host': 'www.cbaigui.com',
'Referer': 'http://www.cbaigui.com /?paged=36' #Referer说明你从哪个网页跳转过来的
}
'''发送请求 获取响应'''
def get_page(self, url):
res = requests.get(url=url, headers=self.headers)
html = res.content.decode("utf-8")
return html
'''解析数据'''
#
def parse_page(self, html):
parse_html = etree.HTML(html)
t = parse_html.xpath('//div[@class="post-inner post-hover"]/h2')
for i in t:
goblin_herf = i.xpath('./a/@href')[0].strip() # 二级页面链接
name = i.xpath('./a/text()')[0].strip() # 对应文件夹的名字
print(name, goblin_herf)
html2 = self.get_page(goblin_herf) # 第二个发生请求
parse_html2 = etree.HTML(html2)
r = parse_html2.xpath('//div[@class="entry"]/p/text()')
for rte in r:
# print(rte)w
try:
with open("./妖怪/汇总.txt", "a", encoding='utf-8') as f:
f.write(rte) # 把内容存储到对应名字的文件里
except OSError:
pass
continue
def main(self):
startPage = int(input("起始页:"))
endPage = int(input("终止页:"))
for page in range(startPage, endPage + 1):
url = self.url.format(page)
print(url)
html = self.get_page(url)
self.parse_page(html)
time.sleep(1)
print("======================第%s页爬取成功!!!!=======================" % page)
if __name__ == '__main__':
imageSpider = goblin()
imageSpider.main()
第42行,有办法按不同的name在同一个文档中对不同的文本进行换行切分吗?
import requests, os
from lxml import etree
from fake_useragent import UserAgent
import time
if not os.path.exists('妖怪'):
os.mkdir('妖怪')
class goblin(object):
def __init__(self):
self.url = "http://www.cbaigui.com/?paged={}"
ua = UserAgent(verify_ssl=False) #是否跳过证书验证,有些网站需要验证才能访问,但你跳过验证就可以正常访问
self.headers = {
'User-Agent': ua.random, #可生成不同浏览器的UA头,UserAgent().chrome 可生成随机谷歌UA头
'Host': 'www.cbaigui.com',
'Referer': 'http://www.cbaigui.com /?paged=36' #Referer说明你从哪个网页跳转过来的
}
'''发送请求 获取响应'''
def get_page(self, url):
res = requests.get(url=url, headers=self.headers)
html = res.content.decode("utf-8")
return html
'''解析数据'''
#
def parse_page(self, html):
parse_html = etree.HTML(html)
t = parse_html.xpath('//div[@class="post-inner post-hover"]/h2')
for i in t:
goblin_herf = i.xpath('./a/@href')[0].strip() # 二级页面链接
name = i.xpath('./a/text()')[0].strip() # 对应文件夹的名字
print(name, goblin_herf)
html2 = self.get_page(goblin_herf) # 第二个发生请求
parse_html2 = etree.HTML(html2)
r = parse_html2.xpath('//div[@class="entry"]/p/text()')
r = ''.join(r)
try:
with open("./妖怪/汇总.txt", "a", encoding='utf-8') as f:
f.write(f'{name}\n{r}\n') # 把内容存储到对应名字的文件里
except OSError:
pass
continue
def main(self):
startPage = int(input("起始页:"))
endPage = int(input("终止页:"))
for page in range(startPage, endPage + 1):
url = self.url.format(page)
print(url)
html = self.get_page(url)
self.parse_page(html)
time.sleep(1)
print("======================第%s页爬取成功!!!!=======================" % page)
if __name__ == '__main__':
imageSpider = goblin()
imageSpider.main()
|