马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
爬取妹子图
https://www.mm131.net/xinggan/5164.html
爬取到的都是同一张广告图片, 应该是被反爬了
发不了图片2333把图片转码发了
import urllib.request as ur
import os
import random
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
proxy_ip = [{'http':'125.123.122.52:9999'}, {'http':'61.189.242.243:55484'}, {'http':'120.26.199.103:8118'}, {'http':'117.90.4.17:9000'}, {'http':'125.123.66.53:9999'}, {'http':'125.123.127.188:9999'}, {'http':'180.117.128.145:8118'}, {'http':'183.129.244.16:12154'}]
def get_proxy():
r = random.randint(0, 7)
ip = proxy_ip[r]
proxy_support = ur.ProxyHandler(ip)
opener = ur.build_opener(proxy_support)
return opener
def save_pic(pic_link, pic_name, dir_name, head=headers):
req = ur.Request(url=pic_link, headers=head)
opener = get_proxy()
pic_content = opener.open(req).read()
path = '\\' + dir_name + '\\' + pic_name +'.jpg'
with open(path, 'wb') as file:
file.write(pic_content)
return
def get_name(url, head=headers):
req = ur.Request(url=url, headers=head)
opener = get_proxy()
temp = opener.open(req).read().decode('gbk')
a = temp.find('<div class="content">') + 30
b = temp.find('</h5>', a)
name = temp[a:b]
folder = '\\%s'%name
if not os.path.exists(folder):
os.mkdir(folder)
return name
class pic():
def __init__(self, url, head, name, h=headers):
self.head = head
self.url = url.replace(self.head, '')
self.name = name
self.page = 1
self.islast = False
self.h = h
def __iter__(self):
return self
def __next__(self):
if self.islast:
raise StopIteration
url = self.head + self.url
req = ur.Request(url=url, headers=self.h)
opener = get_proxy()
temp = opener.open(req).read().decode('gbk')
page_name = self.name + '(图%d)'%self.page
temp_pic = temp.find(page_name) + len(page_name) + 7
temp_pic_end = temp.find('" />', temp_pic)
pic_link = temp[temp_pic:temp_pic_end]
self.page += 1
a = temp.find('\' class="page-en">%d</a>'%self.page)
if a == -1:
self.islast = True
b = temp.rfind('\'', 0, a) + 1
self.url = temp[b:a]
return (pic_link, page_name)
def downloader(url = 'https://www.mm131.net/xinggan/5164.html', head = 'https://www.mm131.net/xinggan/'):
name = get_name(url)
pics = pic(url, head, name)
page = 0
for each_pic in pics:
page += 1
print('正在寻找第%d张图片...'%page)
(link, pic_name) = each_pic
print('正在下载第%d张图片...'%page)
save_pic(link, pic_name, name)
print('第%d张图片下载完成'%page)
if __name__ == '__main__':
downloader()
该网站有 umuuid参数加密 反爬虫机制,我还没找到破解的方法
|