|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import urllib.request#访问网页必定要引入的模块
- import os
- import random
- def url_open(url):
- req=urllib.request.Request(url)
- req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3947.100 Safari/537.36')
- proxies=['119.6.144.70:81','111.1.36.9:80','203:144:144:162:8080']
- proxy=random.choice(proxies)
- proxy_support=urllib.request.ProxyHandler({'http':proxy})
- opener=urllib.request.build_opener(proxy_support)
- urllib.request.install_opener(opener)
- response=urllib.request.urlopen(url)
- html=response.read()
- print(url)
- return html
- def get_page(url):
- html=url_open(url).decode('utf-8')
- a=html.find('current.comment-page')+23
- b=html.find(']',a)
- return html[a:b]
- def find_imgs(url):
- html=url_open(url).decode('utf-8')
- img_adds=[]
- a=html.find('img src=')
- while a!=-1:
- b=html.find('.jpg',a,a+255)
- if b!=-1:
- img_addrs.append(html[a+9:b+4])
- else:
- b=a+9
- a=html.find('img src=',b)
- for each in img_addrs:
- print(each)
-
- def save_imgs(img_addrs):
- for each in img_addrs:
- filename=each.split('/'),[-1]
- with open (filename,'wb') as f:
- omg=url_open(each)
- f.write(img)
- def download_mm(folder='ooxx',page=10):
- os.mkdir(folder)
- os.chdir(folder)
- url="http://jandan.net/ooxx/"
- page_num=int(get_page(url))
- for i in range(pages):
- page_num-=1
- page_url=url+'page'+str(page_num)+'#comments'
- img_addrs=find_imgs(page_url)
- save_imgs(folder,img_addrs)
- if __name__=='__main__':
- download_mm()
复制代码
这个是小甲鱼爬妹子图片的上课的代码,我照着敲得,显示错误有问题,求帮忙修改一下代码?修改有问题的地方麻烦特殊标记一下好让我知道哪里有问题谢谢,错误的原因最好也说下 |
|