|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 哈岁NB 于 2023-2-25 22:25 编辑
tu.py
- import scrapy
- from ..items import DeepimgproItem
- class ImgSpider(scrapy.Spider):
- name = 'img'
- # allowed_domains = ['www.xxx.com']
- start_urls = ['https://pic.netbian.com/4kmeinv/']
- def parse(self, response):
- #解析出了图片的名称和详情页的url
- li_list = response.xpath('//*[@id="main"]/div[3]/ul/li')
- for li in li_list:
- title = li.xpath('./a/b/text()').extract_first() + '.jpg'
- detail_url = 'https://pic.netbian.com'+li.xpath('./a/@href').extract_first()
- item = DeepimgproItem()
- item['title'] = title
- #需要对详情页的url发起请求,在详情页中获取图片的下载链接
- yield scrapy.Request(url=detail_url,callback=self.detail_parse,meta={'item':item})
- #解析详情页的数据
- def detail_parse(self,response):
- meta = response.meta
- item = meta['item']
- img_src = 'https://pic.netbian.com'+response.xpath('//*[@id="img"]/img/@src').extract_first()
- item['img_src'] = img_src
- yield item
复制代码
管道类
- # Define your item pipelines here
- #
- # Don't forget to add your pipeline to the ITEM_PIPELINES setting
- # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
- # useful for handling different item types with a single interface
- import scrapy
- from itemadapter import ItemAdapter
- from scrapy.pipelines.images import ImagesPipeline
- class DeepimgproPipeline(ImagesPipeline):
- # def process_item(self, item, spider):
- # return item
- def get_media_requests(self, item, info):
- img_src = item['img_src']
- #请求传参,将item中的图片名称传递给file_path方法
- #meta会将自身传递给file_path
- print(item['title'],'保存下载成功!')
- yield scrapy.Request(url=img_src,meta={'title':item['title']})
- def file_path(self, request, response=None, info=None, *, item=None):
- #返回图片的名称
- #接收请求传参过来的数据
- title = request.meta['title']
- return title
- def item_completed(self, results, item, info):
- return item
复制代码
大佬们,上面这个为什么爬到的是大图,下面却是缩略图
tu.py
- import scrapy
- from ..items import TupianItem
- class TuSpider(scrapy.Spider):
- name = 'tu'
- #allowed_domains = ['www.xxx.com']
- start_urls = ['https://pic.netbian.com/4kmeinv/']
- def parse(self, response):
- li_list = response.xpath('//*[@id="main"]/div[3]/ul/li')
- for li in li_list:
- img_src = "https://pic.netbian.com" + li.xpath('.//img/@src').extract_first()
- item = TupianItem()
- item['src'] = img_src
- yield item
复制代码
管道类
- # Define your item pipelines here
- #
- # Don't forget to add your pipeline to the ITEM_PIPELINES setting
- # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
- import scrapy
- # useful for handling different item types with a single interface
- from itemadapter import ItemAdapter
- from scrapy.pipelines.images import ImagesPipeline
- class mediaPileline(ImagesPipeline):
- #重写三个方法
- #获取图片数据
- def get_media_requests(self, item, info):
- img_src = item['src']
- print(img_src)
- yield scrapy.Request(img_src)
- #指定图片存储路径(只需返回图片名称)
- def file_path(self, request, response=None, info=None, *, item=None):
- imgName = request.url.split('/')[-1]
- print(imgName,'保存成功')
- return imgName
- def item_completed(self, results, item, info):
- return item
复制代码
|
|