新手爬取妹子图,绝对领域!
#导入爬虫所需要的模块from bs4 import BeautifulSoup
import requests
import os
import time
#获取网页信息
def get_page(url):
try:
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
response = requests.get(url,headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
#解析主页,返回所有套图标题和链接
def parse_page(html):
soup = BeautifulSoup(html,'lxml')
title = soup.find_all('a',{'rel':'bookmark'})
for t in title:
yield #生成一个列表
#解析主页套图内图片链接
def parse_title(html):
soup = BeautifulSoup(html,'lxml')
allimg = soup.find_all('img',{'border':'0'})
for img in allimg:
yield img.get('src')
def main(pagenumber):
url = 'https://www.jdlingyu.mobi/collection/meizitu/page/1' + str(pagenumber)
html = get_page(url)
for p in parse_page(html):
imageurl = get_page(p)
folder_path = './photo/' + p + '/'#以标题命名文件夹民称
if os.path.exists(folder_path) == False:# 判断文件夹是否已经存在
os.makedirs(folder_path)
for index,i in enumerate(parse_title(imageurl)):
down = requests.get(i)
img_name = folder_path + str(index) +'.png'
with open(img_name, 'wb') as file:# 以byte形式将图片数据写入
file.write(down.content)
file.flush()
file.close()# 关闭文件
if __name__ == '__main__':
for i in range(2):#多少页自己决定
main(i)
time.sleep(1)
刚入坑不久,尝试写了一个妹子图的爬虫。已经实现了翻页多页爬取,分标题存放图片文件。思路是先爬主页上套图的标题和链接,再解析链接中的图片,利用生成的列表为下载图片过程中创建以标题命名文件。爬单页速度55-60s左右,感觉代码可以优化,求指点。
页:
[1]