|
|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
import urllib.request
import sys
import os
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')
resp = urllib.request.urlopen(url)
html = resp.read()
# print(html)
return html
def get_page(url): # 获取图片页码
html = url_open(url).decode('utf-8') # 解码,之后查找图片页码做准备
a = html.find('current-comment-page') + 23 # 返回页码数的首位置
b = html.find(']', a) # 返回页码数的末位置
# print(html[a:b])
return html[a:b] # 拼接返回页码
def find_imgs(url): # 在当前URL查找图片
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=') # 图片真实地址
while a != -1:
b = html.find('.jpg', a, a + 200)
if b != -1:
img_addrs.append(html[a + 9:b + 4])
else:
b = a + 9
a = html.find('img src=', b)
return img_addrs
def save_imgs(img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename, 'wb') as f:
img = url_open("http:"+each)
f.write(img)
def download(folder='Girls', pages=20):
os.mkdir(folder) # 创建文件夹
os.chdir(folder) # 切换到文件夹目录
url = 'http://jandan.net/ooxx'
page_num = int(get_page(url)) # 获取当前该图片页码
for i in range(pages): # 依次下载指定页数的图片
page_num -= 1
page_url = url + '/page-' + str(page_num) + '#comments' # 图片的URL
# print(page_url)
img_addrs = find_imgs(page_url)
save_imgs(img_addrs)
if __name__ == '__main__':
download()
================= RESTART: G:\py3.6\Python_works\spider33.py =================
Traceback (most recent call last):
File "G:\py3.6\Python_works\spider33.py", line 65, in <module>
download()
File "G:\py3.6\Python_works\spider33.py", line 54, in download
page_num = int(get_page(url)) # 获取当前该图片页码
File "G:\py3.6\Python_works\spider33.py", line 17, in get_page
html = url_open(url).decode('utf-8') # 解码,之后查找图片页码做准备
File "G:\py3.6\Python_works\spider33.py", line 10, in url_open
resp = urllib.request.urlopen(url)
File "G:\py3.6\lib\urllib\request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "G:\py3.6\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "G:\py3.6\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "G:\py3.6\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "G:\py3.6\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "G:\py3.6\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
>>>
把
resp = urllib.request.urlopen(url)
改成
resp = urllib.request.urlopen(req)
大概是网站的反爬虫机制阻止了你的程序
|
|