#定义网页打开并进行爬虫隐藏
def url_open(url):
req = urllib.request.Request(url) #生成了新的Request,然后用add方法把header加进去
req.add_header('User-Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36')
response = urllib.request.urlopen(url)
html = response.read()
return html
#定义得到网页上的内容函数
def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('/item/4600_2.html')+ 19
b = html.find('<',a)
return html[a : b]#拼接返回号码
#在当前URL查找图片
def find_img(url):
html = url_open(url).decode('utf-8')
img_addrs = []#图片作为一个列表
a = html.find('img_src')#图片的真实地址
while a != -1:
b = html.find('.jpg',a,a+255)#find如果找不到会返回-1
if b != -1:#如果b等于-1,那么就说明找到了
img_addrs.append(html[a+9,b+4])
else:
b = a+9
a = html.find('img_src',b)
return img_addrs
#定义保存图片的函数
def save_img(floder,img_addrs):
for each in img_addrs:
file_name = each.split('/')[-1] #这句split的意思是分隔符是‘/’,负1代表取最后一个结果
with open('file_name','wb') as f :
img = url_open(each)
f.write(img)
#定义下载图片的函数,并应用上面定义的几个函数
def download(folder = '爬虫图片',pages= 20):
os.mkdir(folder)
os.chdir(folder)
url = 'https://www.meitulu.com/item/4600.html'
page_num = int(get_page(url))
for i in range(pages):
page_num = page_num - 1
img_url = "https://mtl.gzhuibei.com/images/img/4600/1.jpg".split('https://mtl.gzhuibei.com/images/img/4600/')[0]+str(page_num)+'.jpg'
img_addrs = find_img(img_url)
save_img(floder,img_addrs)
if __name__=='__main__':
download()
|