|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import re
- import requests
- from bs4 import BeautifulSoup
- def get_html(url):
- header={
- 'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 78.0.3904.108Safari / 537.36QIHU360EE',
- 'Referer': 'https: // www.so.com / s?src = 360chrome_newtab_search & q = % E5 % B8 % 8C % E5 % B3 % B6 % E3 % 81 % 82 % E3 % 81 % 84 % E3 % 82 % 8ASec - Fetch - Mode: navigate'
- }
- html=requests.get(url=url,headers=header)
- #print(html.text)
- return html
- # def get_list(html):
- # html=html.text
- # soup=BeautifulSoup(html,'html.parser')
- # img_re=re.compile('https://p\d[.]ssl[.]qhimgs1[.]com/sdr/400__/.*?[.]jpg')
- # img_list=soup.find_all(img_re,soup)
- # print(img_list)
- #a=re.compile(r'<img style=".*?src="(.*?[.]jpg)"',re.S)
- #link_list=re.findall(a,html.text)
- #return link_list
- def get_list(html):
- link_list = re.findall(r'img":"(.*?\.jpg)', html.text)
- #print(link_list)
- return link_list
- def main():
- url='https://image.so.com/i?src=360pic_normal&z=1&i=0&cmg=15484592.3836743514792807400.1594087443636.3574&q=%E5%B8%8C%E5%B2%9B%E3%81%82%E3%81%84%E3%82%8A'
- html=get_html(url)
- img_list=get_list(html)
- #print(img_list)
- for img_url in img_list:
- img_name='美眉/'+img_url.split('/')[-1]
- print(img_name)
- with open(img_name,'wb') as f:
- # print('开始爬取图片')
- # html=get_html(img_list)
- # print(html.content)
- f.write(html.content)
- print('爬取成功')
- if __name__ == "__main__":
- main()
复制代码
爬虫爬取的图片打开后显示这样,什么问题?怎么解决尼
本帖最后由 Twilight6 于 2020-7-12 14:48 编辑
这样就好了,你打印下你爬到的链接发现是这样的:
'http:\\/\\/f1.huatiku.com\\/attachment\\/forum\\/201808\\/20\\/111505racut3fuqpw7kskc.jpg'
只要用 re 模块的 sub 把 \ 替换成空字符即可,但是只能爬几个图片,然后就被反爬了
- import re
- import requests
- from bs4 import BeautifulSoup
- def get_html(url):
- header={
- 'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 78.0.3904.108Safari / 537.36QIHU360EE',
- 'Referer': 'https: // www.so.com / s?src = 360chrome_newtab_search & q = % E5 % B8 % 8C % E5 % B3 % B6 % E3 % 81 % 82 % E3 % 81 % 84 % E3 % 82 % 8ASec - Fetch - Mode: navigate'
- }
- html=requests.get(url=url,headers=header)
- #print(html.text)
- return html
- # def get_list(html):
- # html=html.text
- # soup=BeautifulSoup(html,'html.parser')
- # img_re=re.compile('https://p\d[.]ssl[.]qhimgs1[.]com/sdr/400__/.*?[.]jpg')
- # img_list=soup.find_all(img_re,soup)
- # print(img_list)
- #a=re.compile(r'<img style=".*?src="(.*?[.]jpg)"',re.S)
- #link_list=re.findall(a,html.text)
- #return link_list
- def get_list(html):
- link_list = re.findall(r'img":"(.*?\.jpg)', html.text)
- #print(link_list)
- return link_list
- def main():
- url='https://image.so.com/i?src=360pic_normal&z=1&i=0&cmg=15484592.3836743514792807400.1594087443636.3574&q=%E5%B8%8C%E5%B2%9B%E3%81%82%E3%81%84%E3%82%8A'
- html=get_html(url)
- img_list=get_list(html)
- for img_url in img_list:
- img_url = re.sub(r'\\','',img_url)
- img_name='美眉'+img_url.split('/')[-1]
- # print(img_name)
- with open(img_name,'wb') as f:
- # print('开始爬取图片')
- html=get_html(img_url)
- # print(html.content)
- f.write(html.content)
- print('爬取成功')
- if __name__ == "__main__":
- main()
复制代码
|
|