|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 我龙哟 于 2020-4-3 17:06 编辑
使用requests和bs4爬取meizitu
运行结果:将近一半图片未下载成功
再次运行:未下载成功的图片也有可能下载成功
网络连接无问题,直接浏览器访问图片源地址可以访问
求救……
- import requests
- import os
- from bs4 import BeautifulSoup
- import re
- def openurl(url):
- try:
- headers = {'Host':'www.meizitu.com','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'}
- res = requests.get(url,headers=headers)
- res.raise_for_status()
- return res
- except:
- pass
- def findimgurl(url):
- res = openurl(url)#print(res.text[:100])
- soup = BeautifulSoup(res.text,"html.parser")
- data = soup.find_all(name='div',attrs={'class':"pic"})
- data2 = []
- for each in data:
- data2.append(each.find_all(name='a',attrs={'target':"_blank"}))
- imgurl = []#print(len(data2))
- for each2 in data2:
- a = re.search(r'src.+jpg',str(each2))#print(a.group(0)[5:])
- imgurl.append(a.group(0)[5:])
- return imgurl#print(imgurl)
- def save(url):
- #print(url)
- try:
- path = url.split('/')[-4]+url.split('/')[-3]+url.split('/')[-2]+url.split('/')[-1]
- if not os.path.exists(path):
- print(path)
- headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'}
- img = requests.get(url,headers=headers,timeout=30)
- print('状态码为'+str(img.status_code)+',准备下载...')
- with open(path,"wb") as f:
- f.write(img.content)
- f.close()
- print("保存成功\n")
- else:
- print(path)
- print("文件已存在\n")
- except:
- print("保存出现异常\n")
-
- def main():
- i = input('下载页数:')
- floder = 'meizitu_limg'#设置图片存储文件夹
- if not os.path.exists(floder):
- os.mkdir(floder)
- print('\n文件夹'+ floder +'创建完成 ...\n')
- os.chdir(floder)
- for x in range(1,int(i)+1):
- print('\n第'+ str(x) +'页,准备就绪,开始下载 ...\n\n')
- root = '第'+ str(x) +'页'
- if not os.path.exists(root):
- os.mkdir(root)
- print('\n文件夹'+ root +'创建完成 ...\n')
- os.chdir(root)
- url = 'https://www.meizitu.com/a/list_1_'+str(x)+'.html'#第一页https://www.meizitu.com/a/list_1_1.html
- imgurl = findimgurl(url)#print(imgurl)
- print('第'+ str(x) +'页,共查询到'+str(len(imgurl))+'张图片,正在下载中 ...\n\n')
- for eachurl in imgurl:
- save(eachurl)
- print('\n第'+ str(x) +'页图片全部下载完成喽 lalala\n\n')
- os.chdir('..')
-
- if __name__ == "__main__":
- main()
复制代码
球球大哥们了,帮忙看看
个人感觉不是timeout的问题,30秒够久了
下面是三种情况输出界面:
2016a0742limg.jpg
状态码为200,准备下载...
保存成功
2016a0741limg.jpg
保存出现异常
2016a0740limg.jpg
文件已存在 |
|