关于爬取斗图啦网站的一些异常(跪求各位大佬能告诉小白是哪里出错了)
import requestsimport parsel
import re
import concurrent.futures
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400'}
def send_request(url):
'''请求数据'''
response = requests.get(url = url, headers = headers, verify = False)
return(response)
def parse_data(data):
'''数据解析'''
selector = parsel.Selector(data)
result_list = selector.xpath('//a[@class="col-xs-6 col-sm-3"]')
for result in result_list:
title = result.xpath('./img/@data-original').extract_first()
src_url = result.xpath('./img/@alt').extract_first()
#准备文件后缀名
all_title = title + '.' + src_url.split('.')[-1]
yield all_title, src_url
def sava_data(file_name,data):
'''数据保存'''
with open('img\\' + file_name, mode = 'wb') as f:
f.write(data)
print('保存完成:', file_name)
def main(page):
'''实现翻页的效果'''
for page in range(1,page + 1):
print('============正在爬取第{}页数据============'.format(page))
thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=3)
res = send_request('https://www.doutula.com/photo/list/?page={}'.format(str(page)))
src_url = parse_data(res.text)
for file, url in src_url:
image_response = send_request(url)
thread_pool.submit(save_data, file, image_response.content)
thread_pool.shutdown()
if __name__ == '__main__':
main(10)
一些异常的URL
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\python.exe E:/python_fruit/表情包/表情包_ronot-多线程.py
============正在爬取第1页数据============
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\urllib3\connectionpool.py:986: InsecureRequestWarning: Unverified HTTPS request is being made to host 'www.doutula.com'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning,
Traceback (most recent call last):
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\models.py", line 380, in prepare_url
scheme, auth, host, port, path, query, fragment = parse_url(url)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\urllib3\util\url.py", line 392, in parse_url
return six.raise_from(LocationParseError(source_url), None)
File "<string>", line 3, in raise_from
urllib3.exceptions.LocationParseError: Failed to parse: 人呢?
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "E:/python_fruit/表情包/表情包_ronot-多线程.py", line 64, in <module>
main(10)
File "E:/python_fruit/表情包/表情包_ronot-多线程.py", line 48, in main
image_response = send_request(url)
File "E:/python_fruit/表情包/表情包_ronot-多线程.py", line 16, in send_request
response = requests.get(url = url, headers = headers, verify = False)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\api.py", line 76, in get
return request('get', url, params=params, **kwargs)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\api.py", line 61, in request
return session.request(method=method, url=url, **kwargs)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\sessions.py", line 516, in request
prep = self.prepare_request(req)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\sessions.py", line 459, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\models.py", line 314, in prepare
self.prepare_url(url, params)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\models.py", line 382, in prepare_url
raise InvalidURL(*e.args)
requests.exceptions.InvalidURL: Failed to parse: 人呢?
Process finished with exit code 1
parse_data的url提取有问题。其他的暂时为测到 Stubborn 发表于 2020-6-5 22:12
parse_data的url提取有问题。其他的暂时为测到
请问处理方法该怎么办
直接pass? 风尘岁月 发表于 2020-6-6 10:59
请问处理方法该怎么办
直接pass?
你提取的url连接有问题,不会打印下看下,提取的是什么东西吗? 修改下提取规则啊{:10_285:} extract_first()
scrapy 用法 兢兢 发表于 2020-6-6 13:42
extract_first()
scrapy 用法
{:10_266:}我不会用scrapy框架 title = result.xpath('./img/@data-original')
src_url = result.xpath('./img/@alt')
获取数组的第一个数据
页:
[1]