马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
第一次发帖,多多包涵
如果有什么地方做的不好,请指出来,轻喷
倒数第二行url是专栏的网址,如果想爬取别的专栏,可以更改urlimport requests
from bs4 import BeautifulSoup
import re
import os
os.makedirs(r'E://图片')
header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.47'}
def getdata(url,header):
req = requests.get(url,headers=header)
imgurls = BeautifulSoup(req.text,"html.parser").find_all('img')
return imgurls
def find_img(urls,header):
i = 0
for each in urls:
i += 1
t = str(each)
url0 = re.findall(r'data-src="(.*)height',t)
if len(url0) != 0:
url = 'https:' + url0[0][:-2]
print(url)
img_data = requests.get(url,headers = header).content
f = open(r'E://图片/贞德{0}.jpg'.format(i),'wb') #“贞德i.jpg”是图片文件名
f.write(img_data)
f.close()
return 0
url = 'https://www.bilibili.com/read/cv1705485'
find_img(getdata(url,header),header)
|