|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import urllib.request
- import os
- import re
- def url_open():
- req = urllib.request.Request(url)
- req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3013.3 Safari/537.36')
- response = urllib.request.urlopen(url)
- html = response.read()
- return html
-
- def get_page(url):
- html = url_open().decode('utf-8')
- p = r'current-comment-page">\[(\d{1,})\]'
- cp = re.findall(p, html)
- s = ''.join(cp)
- return s
- def find_imgs(url):
- html = url_open().decode('utf-8')
- p=r'<img src="//(.+\.jpg)'
- img_addrs = re.findall(p, html)
- for each in img_addrs:
- print(each)
- return img_addrs
-
- def save_imgs(folder, img_addrs):
- p=r'/(.+\.jpg)'
- for each in img_addrs:
- filename = re.findall(p,each)
- #filename = each.split('/')[-1]
- with open(filename, 'wb') as f:
- img = url_open(each)
- f.write(img)
- def download_pic(folder='img', pages=10):
- # os.mkdir(folder)
- os.chdir(folder)
- url = "http://jandan.net/pic/"
- page_num = int(get_page(url))
- for i in range(pages):
- page_num -= i
- page_url = url + 'page-' + str(page_num) + '#comments'
- img_addrs = find_imgs(page_url)
- save_imgs(folder, img_addres)
- if __name__ == '__main__':
- download_pic()
复制代码
Traceback (most recent call last):
File "E:\workspace\py\download_pic.py", line 74, in <module>
download_pic()
File "E:\workspace\py\download_pic.py", line 65, in download_pic
page_num = int(get_page(url))
File "E:\workspace\py\download_pic.py", line 14, in get_page
html = url_open().decode('utf-8')
File "E:\workspace\py\download_pic.py", line 6, in url_open
req = urllib.request.Request(url)
NameError: name 'url' is not defined
眼花花不知道在哪里写错,,正则也用的怪怪的 |
|