如何制造多协程爬虫
本帖最后由 风尘岁月 于 2020-8-28 13:16 编辑最近学习了gevent
想做多协程
可是不知道咋做(做了很多次 都各种异常(其实我不知道咋搞)){:10_245:}
代码如下:
from parsel import Selector
from random import randint
from time import time
import os,aiohttp,gevent
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
if not os.path.exists("image"):
os.mkdir("image")
class Download(object):
def mk_url(self,startnum,endnum):
'''生成url'''
for _ in range(startnum,endnum+1):
base_url = 'https://anime-pictures.net/pictures/view_posts/{}?lang=en'.format(_)
task_list =[]
task_list.append(base_url)
return task_list
async def fetch_html(self,session, url):
'''请求网页数据'''
async with session.get(url) as response:
return await response.text()
async def fetch_img_data(self,session,url):
'''请求图片数据'''
async with session.get(url) as data:
return await data.read()
async def parser_data(self,session,html):
'''处理数据'''
selector = Selector(html)
result_list = selector.xpath('//span[@class="img_block_big"]')
for result in result_list:
image_url = result.xpath('./a/picture/source/img/@src').extract_first()
img_url = 'https:' + image_url# 手动拼url
content = await self.fetch_img_data(session,img_url)
id = str(randint(0,99999999999999))
try:
with open('image\\' + id + os.path.splitext(img_url), mode='wb') as f:
f.write(content)
print('保存完成',id)
except Exception as e:
print(e)
async def start_save(self,url):
async with aiohttp.ClientSession(headers=headers) as session:
html = await self.fetch_html(session, url=url)
await self.parser_data(session=session, html=html)
# base_url = 'https://anime-pictures.net/pictures/view_posts/0?lang=en'
async def download_pictures(self,startnum,endnum):
for _ in range(startnum,endnum+1):
url_list = self.mk_url(startnum,endnum)
for url in url_list:
base_url = url
await self.start_save(base_url)
'''实例化'''
if __name__ == '__main__':
print('任务启动中...')
download = Download()
s_time = time()
g1 = gevent.spawn(download.download_pictures,1,2000),
g2 = gevent.spawn(download.download_pictures, 2001, 4000),
# g3 = await gevent.spawn(download.download_pictures, 4001, 6000),
# g4 = await gevent.spawn(download.download_pictures, 6001, 8000),
gevent.joinall()
e_time = time()
print('用时:',e_time - s_time,'秒')
改进出多协程就有重赏{:10_255:}
谢谢各位鱼油{:10_297:}
本帖最后由 风尘岁月 于 2020-9-19 09:21 编辑
from parsel import Selector
from random import randint
from time import time
import os,aiohttp,asyncio
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
path = os.path.dirname(__file__)
path = path + '/image'
if not os.path.exists(path):
print('检测到没有容器')
os.mkdir(path)
print("生成完毕" + path)
class Download(object):
def mk_url(self,startnum,endnum):
'''生成url'''
for _ in range(startnum,endnum+1):
base_url = 'https://anime-pictures.net/pictures/view_posts/{}?lang=en'.format(_)
task_list =[]
task_list.append(base_url)
return task_list
async def fetch_html(self,session, url):
'''请求网页数据'''
async with session.get(url) as response:
return await response.text()
async def fetch_img_data(self,session,url):
'''请求图片数据'''
async with session.get(url) as data:
return await data.read()
async def parser_data(self,session,html):
'''处理数据'''
selector = Selector(html)
result_list = selector.xpath('//span[@class="img_block_big"]')
for result in result_list:
image_url = result.xpath('./a/picture/source/img/@src').extract_first()
img_url = 'https:' + image_url# 手动拼url
content = await self.fetch_img_data(session,img_url)
id = str(randint(0,99999999999999))
try:
with open(path + '\\' + id + os.path.splitext(img_url), mode='wb') as f:
f.write(content)
print('保存完成',id)
except Exception as e:
print(e)
async def start_save(self,url):
async with aiohttp.ClientSession(headers=headers) as session:
html = await self.fetch_html(session, url=url)
await self.parser_data(session=session, html=html)
# base_url = 'https://anime-pictures.net/pictures/view_posts/0?lang=en'
async def download_pictures(self,startnum,endnum):
for page in range(startnum,endnum+1):
print("######正在下载第{}页数据######".format(page))
url_list = self.mk_url(startnum,endnum)
for url in url_list:
base_url = url
await self.start_save(base_url)
'''实例化'''
if __name__ == '__main__':
print('任务启动中...')
download = Download()
loop = asyncio.get_event_loop()
s_time = time()
tasks = [
asyncio.ensure_future(download.download_pictures(1,2000)),
asyncio.ensure_future(download.download_pictures(2001,4000)),
asyncio.ensure_future(download.download_pictures(4001, 6000))
]
loop.run_until_complete(asyncio.gather(*tasks))
e_time = time()
print('用时:',e_time - s_time,'秒')
这么久过去了 难道就没有大佬能解答了吗
我都自己写好了
风尘岁月 发表于 2020-9-18 20:34
这么久过去了 难道就没有大佬能解答了吗
我都自己写好了
没听过协程,只知道进程、线程
如果你自己写好了,就分享一下代码呗 疾风怪盗 发表于 2020-9-18 20:38
没听过协程,只知道进程、线程
如果你自己写好了,就分享一下代码呗
好吧
(╯▽╰)
页:
[1]