|
发表于 2018-9-8 15:45:01
|
显示全部楼层
import requests
import re
import os
import time
dir_path = r'd://nhentai' # 文件保存目录,需要自己创建
ua = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
headers = {
'User-Agent': ua
}
def urlretrieve(url, path, proxies=True): # 自定义下载图片函数
if proxies:
proxies = {
'http': '127.0.0.1:1080',
'https': '127.0.0.1:1080'
}
web = requests.get(url, headers=headers, proxies=proxies)
with open(path, 'wb') as fp:
fp.write(web.content)
return True
else:
web = requests.get(url, headers=headers)
with open(path, 'wb') as fp:
fp.write(web.content)
return True
def download_img(imgs, page_id):
for img in imgs:
# 缩略图 'https://t.nhentai.net/galleries/1257176/22t.png'
# 大图 'https://i.nhentai.net/galleries/1257176/22.png'
img_re = re.match(r'.+?/(\d+)/(\d+)t(.+)', img)
# page_id = img_re.group(1) # 这个不是作品ID
link_id = img_re.group(1)
img_id = img_re.group(2) # 图片ID
img_suffix = img_re.group(3) # 图片后缀
url = 'https://i.nhentai.net/galleries/%s/%s%s' % (link_id, img_id, img_suffix) # 图片URL
page_id_path = os.path.join(dir_path, page_id) # 作品ID目录
if not os.path.exists(page_id_path): # 一个作品一个目录
os.mkdir(page_id_path)
img_path = os.path.join(page_id_path, img_id + img_suffix) # 图片保存位置
success = urlretrieve(url, img_path, proxies=False) # 调用自定义下载函数
if success:
print('下载%s到%s成功' % (url, img_path))
def parse_search_page(url):
web = requests.get(url, headers=headers)
if web.status_code == 200:
text = web.text
links = re.findall(r'<a href="(.*?)/" class="cover"', text)
return links
def parse_id_page(links):
for link in links:
# link 是'/g/2444092'
url = 'https://nhentai.net' + link
page_id = link.split('/')[-1]
web = requests.get(url)
if web.status_code == 200:
text = web.text
imgs = re.findall(r'<div class="thumb-container">.+?data-src="(.+?)"', text, re.S)
download_img(imgs, page_id) # 缩略图解析完成,调用下载函数
time.sleep(5) # 每下载完一个作品睡5秒
def main():
try:
search = input('请输入你要搜索的关键字,如fate:')
page = int(input('请输入你要下载的分页数,如2:'))
except Exception as e:
print(e)
return False
for i in range(1, page + 1):
url = 'https://nhentai.net/search/?q=%s&page=%d' % (search, i)
links = parse_search_page(url) # 得到作品ID,数组
parse_id_page(links) # 进入作品ID页面解析缩略图
if __name__ == '__main__':
main()
|
|