爬虫多线程
import requestsimport bs4
import re
from lxml import etree
from multiprocessing import Pool
def open_html(url):
res = requests.get(url=url, headers=headers)
html = res.text
return html
def get_mulu():# 拿到小说目录的地址
url1 = 'https://www.biduo.cc/search.php?q='
name = input("请输入小说名:")
global f
f = open(name + '.txt', 'w',encoding='utf-8')
url = url1 + name
res = requests.get(url, headers=headers)
s = bs4.BeautifulSoup(res.text, features="lxml")
xiaoshuo = s.find("h3", class_="result-item-title result-game-item-title")# 找到 第一个 标签(即为搜索榜一) 找全部可用s.find_all
url_ = xiaoshuo.find("a").get('href')# 在获得的标签中 继续找到 a 标签,并get 到 href 属性
return ("https://www.biduo.cc" + url_)# 加前缀
def get_html(url):
response = requests.get(url=url, headers=headers)
html = response.text
return html
def get_text(html):
tree = etree.HTML(html)
title = tree.xpath('//div[@class="bookname"]/h1/text()')
f.write(title)
f.write('\n\n')
text = tree.xpath('//div[@id="content"]//text()')
for each in text:
each.replace('\xa0\xa0\xa0\xa0','\n')
print(each)
f.write(each + '\n')
def get_list(url):
html = open_html(url)
content_list = []
regular1 = re.compile('<dd><a href="(.*?)">.*?</a></dd>')
list1 = regular1.findall(html)
url_1 = 'https://www.biduo.cc'
for each in list1:
url = url_1 + each
content_list.append(url)
return content_list
def main():
catalog_address = get_mulu()
number = int(input('请输入您想获取的章节总数:'))
content_list = get_list(catalog_address)
content_list = content_list
for each in content_list:
print(each)
p = Pool(9)
p.map(get_text,content_list)
f.close()
if __name__ == "__main__":
headers = {
'Accept-Language': 'zh-CN',
'Cache-Control': 'no-cache',
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
}
main()
有大佬能给指点一下我这个多线程哪用错了吗{:10_266:} 本帖最后由 1q23w31 于 2020-8-27 14:28 编辑
import requests
import bs4
import re
from lxml import etree
from multiprocessing import Pool
def open_html(url):
res = requests.get(url=url, headers=headers)
html = res.text
return html
def get_mulu():# 拿到小说目录的地址
url1 = 'https://www.biduo.cc/search.php?q='
name = input("请输入小说名:")
global f
f = open(name + '.txt', 'w',encoding='utf-8')
url = url1 + name
res = requests.get(url, headers=headers)
s = bs4.BeautifulSoup(res.text, features="lxml")
xiaoshuo = s.find("h3", class_="result-item-title result-game-item-title")# 找到 第一个 标签(即为搜索榜一) 找全部可用s.find_all
url_ = xiaoshuo.find("a").get('href')# 在获得的标签中 继续找到 a 标签,并get 到 href 属性
return ("https://www.biduo.cc" + url_)# 加前缀
def get_html(url):
response = requests.get(url=url, headers=headers)
html = response.text
return html
def get_text(url):
html = requests.get(url,headers=headers).text
tree = etree.HTML(html)
title = tree.xpath('//div[@class="bookname"]/h1/text()')
print(title)
f.write(title)
f.write('\n\n')
text = tree.xpath('//div[@id="content"]//text()')
for each in text:
each.replace('\xa0\xa0\xa0\xa0','\n')
print(each)
f.write(each + '\n')
def get_list(url):
html = open_html(url)
content_list = []
regular1 = re.compile('<dd><a href="(.*?)">.*?</a></dd>')
list1 = regular1.findall(html)
url_1 = 'https://www.biduo.cc'
for each in list1:
url = url_1 + each
content_list.append(url)
return content_list
def main():
catalog_address = get_mulu()
number = int(input('请输入您想获取的章节总数:'))
content_list = get_list(catalog_address)
print(len(content_list))
content_list = content_list
for each in content_list:
print(each)
p = Pool(9)
p.map(get_text,content_list)
p.join()
f.close()
if __name__ == "__main__":
headers = {
'Accept-Language': 'zh-CN',
'Cache-Control': 'no-cache',
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
}
main()
代码已改好,具体改法看我的代码35-37行写代码细心点 1q23w31 发表于 2020-8-27 14:27
代码已改好,具体改法看我的代码35-37行写代码细心点
{:10_256:}这个实在之前的代码上删删改改就容易犯错 本帖最后由 1q23w31 于 2020-8-27 14:44 编辑
君子好逑 发表于 2020-8-27 14:40
这个实在之前的代码上删删改改就容易犯错
还有你的部分函数功能重复def open_html(url):
res = requests.get(url=url, headers=headers)
html = res.text
return html
def get_html(url):
response = requests.get(url=url, headers=headers)
html = response.text
return html
这两个一模一样 1q23w31 发表于 2020-8-27 14:27
代码已改好,具体改法看我的代码35-37行写代码细心点
大佬,这个代码在你那能运行得通吗{:10_277:}
我这边老是报错先是这个错误提示
我在get_text函数里粘了一遍headers后又报这个错误{:10_266:} 1q23w31 发表于 2020-8-27 14:42
还有你的部分函数功能重复
这两个一模一样
{:10_256:} 君子好逑 发表于 2020-8-27 14:45
import requests
import bs4
import re
from lxml import etree
from multiprocessing import Pool
headers = {
'Accept-Language': 'zh-CN',
'Cache-Control': 'no-cache',
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
}
def open_html(url):
res = requests.get(url=url, headers=headers)
html = res.text
return html
def get_mulu():# 拿到小说目录的地址
url1 = 'https://www.biduo.cc/search.php?q='
name = input("请输入小说名:")
global f
f = open(name + '.txt', 'w',encoding='utf-8')
url = url1 + name
res = requests.get(url, headers=headers)
s = bs4.BeautifulSoup(res.text, features="lxml")
xiaoshuo = s.find("h3", class_="result-item-title result-game-item-title")# 找到 第一个 标签(即为搜索榜一) 找全部可用s.find_all
url_ = xiaoshuo.find("a").get('href')# 在获得的标签中 继续找到 a 标签,并get 到 href 属性
return ("https://www.biduo.cc" + url_)# 加前缀
def get_html(url):
response = requests.get(url=url, headers=headers)
html = response.text
return html
def get_text(url):
html = requests.get(url,headers=headers).text
tree = etree.HTML(html)
title = tree.xpath('//div[@class="bookname"]/h1/text()')
print(title)
f.write(title)
f.write('\n\n')
text = tree.xpath('//div[@id="content"]//text()')
for each in text:
each.replace('\xa0\xa0\xa0\xa0','\n')
print(each)
f.write(each + '\n')
def get_list(url):
html = open_html(url)
content_list = []
regular1 = re.compile('<dd><a href="(.*?)">.*?</a></dd>')
list1 = regular1.findall(html)
url_1 = 'https://www.biduo.cc'
for each in list1:
url = url_1 + each
content_list.append(url)
return content_list
def main():
catalog_address = get_mulu()
number = int(input('请输入您想获取的章节总数:'))
content_list = get_list(catalog_address)
print(len(content_list))
content_list = content_list
for each in content_list:
print(each)
p = Pool(9)
p.map(get_text,content_list)
f.close()
if __name__ == "__main__":
main()
这样试一下 1q23w31 发表于 2020-8-27 14:50
这样试一下
还是报f没定义的错误提示大佬{:10_266:} 君子好逑 发表于 2020-8-27 14:53
还是报f没定义的错误提示大佬
import requests
import bs4
import re
from lxml import etree
from multiprocessing import Pool
headers = {
'Accept-Language': 'zh-CN',
'Cache-Control': 'no-cache',
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
}
def open_html(url):
res = requests.get(url=url, headers=headers)
html = res.text
return html
def get_mulu():# 拿到小说目录的地址
url1 = 'https://www.biduo.cc/search.php?q='
name = input("请输入小说名:")
global f
f = open(name + '.txt', 'w',encoding='utf-8')
url = url1 + name
res = requests.get(url, headers=headers)
s = bs4.BeautifulSoup(res.text, features="lxml")
xiaoshuo = s.find("h3", class_="result-item-title result-game-item-title")# 找到 第一个 标签(即为搜索榜一) 找全部可用s.find_all
url_ = xiaoshuo.find("a").get('href')# 在获得的标签中 继续找到 a 标签,并get 到 href 属性
return ("https://www.biduo.cc" + url_)# 加前缀
def get_html(url):
response = requests.get(url=url, headers=headers)
html = response.text
return html
def get_text(url):
html = requests.get(url,headers=headers).text
tree = etree.HTML(html)
title = tree.xpath('//div[@class="bookname"]/h1/text()')
print(title)
global f
f.write(title)
f.write('\n\n')
text = tree.xpath('//div[@id="content"]//text()')
for each in text:
each.replace('\xa0\xa0\xa0\xa0','\n')
print(each)
f.write(each + '\n')
def get_list(url):
html = open_html(url)
content_list = []
regular1 = re.compile('<dd><a href="(.*?)">.*?</a></dd>')
list1 = regular1.findall(html)
url_1 = 'https://www.biduo.cc'
for each in list1:
url = url_1 + each
content_list.append(url)
return content_list
def main():
catalog_address = get_mulu()
number = int(input('请输入您想获取的章节总数:'))
content_list = get_list(catalog_address)
print(len(content_list))
content_list = content_list
for each in content_list:
print(each)
p = Pool(9)
p.map(get_text,content_list)
global f
f.close()
if __name__ == "__main__":
main()
建议你把用到文件的地方单独列出来,已修改f的问题 1q23w31 发表于 2020-8-27 14:57
建议你把用到文件的地方单独列出来,已修改f的问题
{:10_245:}
大佬,你的意思是在那个get_text里面打开关闭文件吗。我忽然有个点子大佬,如果我进get_text函数的时候如果检测到文件存在,就追加,文件不存在就新建行不行。或者在main里面新建一个文件后直接关闭,以后在get_text里面直接追加内容是不是就不用把f用global申明成全局了 文件操作最好是集中到一个函数里面,不然你其他地方用到这个文件,还得声明一下,你说的在get_text里检测的方法也行, 1q23w31 发表于 2020-8-27 15:11
文件操作最好是集中到一个函数里面,不然你其他地方用到这个文件,还得声明一下,你说的在get_text里检测的 ...
大佬,那个多线程pool后面跟的参数是指一次处理的进程个数,跟总共需要处理的进程个数多少没有关系是吧 1q23w31 发表于 2020-8-27 15:11
文件操作最好是集中到一个函数里面,不然你其他地方用到这个文件,还得声明一下,你说的在get_text里检测的 ...
但是我看视频上要再p.map之后加上p.close和p.join。不加的话没影响吗 1q23w31 发表于 2020-8-27 15:11
文件操作最好是集中到一个函数里面,不然你其他地方用到这个文件,还得声明一下,你说的在get_text里检测的 ...
import requests
import bs4
import re
from lxml import etree
from multiprocessing import Pool
headers = {
'Accept-Language': 'zh-CN',
'Cache-Control': 'no-cache',
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
}
def open_html(url):
res = requests.get(url=url, headers=headers)
html = res.text
return html
def get_mulu():# 拿到小说目录的地址
url1 = 'https://www.biduo.cc/search.php?q='
name = input("请输入小说名:")
f = open(name + '.txt', 'w',encoding='utf-8')
f.close()
url = url1 + name
res = requests.get(url, headers=headers)
s = bs4.BeautifulSoup(res.text, features="lxml")
xiaoshuo = s.find("h3", class_="result-item-title result-game-item-title")# 找到 第一个 标签(即为搜索榜一) 找全部可用s.find_all
url_ = xiaoshuo.find("a").get('href')# 在获得的标签中 继续找到 a 标签,并get 到 href 属性
return ("https://www.biduo.cc" + url_,name)# 加前缀
def get_text(dic):
name = dic['name']
url = dic['url']
f = open(name + '.txt','a+',encoding='utf-8')
html = requests.get(url,headers=headers).text
tree = etree.HTML(html)
title = tree.xpath('//div[@class="bookname"]/h1/text()')
f.write(title)
f.write('\n\n')
text = tree.xpath('//div[@id="content"]//text()')
for each in text:
each.replace('\xa0\xa0\xa0\xa0','\n')
f.write(each + '\n')
f.write('\n\n')
f.close()
print(title,'爬取完成!!!')
def get_list(url,name):
html = open_html(url)
content_list = []
regular1 = re.compile('<dd><a href="(.*?)">.*?</a></dd>')
list1 = regular1.findall(html)
url_1 = 'https://www.biduo.cc'
for each in list1:
url = url_1 + each
dict1 = {
'name':name,
'url':url
}
content_list.append(dict1)
return content_list
def main():
global name
(catalog_address,name) = get_mulu()
number = int(input('请输入您想获取的章节总数:'))
content_list = get_list(catalog_address,name)
print(len(content_list))
content_list = content_list
for each in content_list:
print(each)
p = Pool(9)
p.map(get_text,content_list)
if __name__ == "__main__":
main()
大佬,这是我改完的代码,你看一下有没有什么能改进的地方。还有就是爬完是无序的,你有什么解决的好方法吗{:10_256:} 君子好逑 发表于 2020-8-27 16:17
大佬,这是我改完的代码,你看一下有没有什么能改进的地方。还有就是爬完是无序的,你有什么解决的好方 ...
多线程运行,如果把内容放在一个文件里,本身就是无序,无法避免,但可以创建多个文件来达到目的,(创建的文件多是一个缺点)比如说多线程爬取图片,就不会乱码 君子好逑 发表于 2020-8-27 16:17
大佬,这是我改完的代码,你看一下有没有什么能改进的地方。还有就是爬完是无序的,你有什么解决的好方 ...
如果没有问题了,评最佳吧
页:
[1]