|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
Traceback (most recent call last):
File "G:\python33\lib\urllib\request.py", line 1182, in do_open
h.request(req.get_method(), req.selector, req.data, headers)
File "G:\python33\lib\http\client.py", line 1088, in request
self._send_request(method, url, body, headers)
File "G:\python33\lib\http\client.py", line 1126, in _send_request
self.endheaders(body)
File "G:\python33\lib\http\client.py", line 1084, in endheaders
self._send_output(message_body)
File "G:\python33\lib\http\client.py", line 922, in _send_output
self.send(msg)
File "G:\python33\lib\http\client.py", line 857, in send
self.connect()
File "G:\python33\lib\http\client.py", line 834, in connect
self.timeout, self.source_address)
File "G:\python33\lib\socket.py", line 512, in create_connection
raise err
File "G:\python33\lib\socket.py", line 503, in create_connection
sock.connect(sa)
TimeoutError: [WinError 10060] 由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "F:\python\python练习\056论一只爬虫的自我修养4:OOXX(源代码)\课堂演示\download_mm.py", line 72, in <module>
download_mm()
File "F:\python\python练习\056论一只爬虫的自我修养4:OOXX(源代码)\课堂演示\download_mm.py", line 63, in download_mm
page_num = int(get_page(url))
File "F:\python\python练习\056论一只爬虫的自我修养4:OOXX(源代码)\课堂演示\download_mm.py", line 24, in get_page
html = url_open(url).decode('utf-8')
File "F:\python\python练习\056论一只爬虫的自我修养4:OOXX(源代码)\课堂演示\download_mm.py", line 17, in url_open
response = urllib.request.urlopen(url)
File "G:\python33\lib\urllib\request.py", line 161, in urlopen
return opener.open(url, data, timeout)
File "G:\python33\lib\urllib\request.py", line 463, in open
response = self._open(req, data)
File "G:\python33\lib\urllib\request.py", line 481, in _open
'_open', req)
File "G:\python33\lib\urllib\request.py", line 441, in _call_chain
result = func(*args)
File "G:\python33\lib\urllib\request.py", line 1210, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "G:\python33\lib\urllib\request.py", line 1184, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [WinError 10060] 由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。>
为什么报错啊,求教,刚开始跟小甲鱼学爬虫,大神勿怪!
代码如下:
import urllib.request
import os
import random
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.65 Safari/537.36')
proxies = ['119.6.144.70:81', '111.1.36.9:80', '203.144.144.162:8080']
proxy = random.choice(proxies)
proxy_support = urllib.request.ProxyHandler({'http':proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
response = urllib.request.urlopen(url)
html = response.read()
return html
def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('current-comment-page') + 23
b = html.find(']', a)
return html[a:b]
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
while a != -1:
b = html.find('.jpg', a, a+255)
if b != -1:
img_addrs.append(html[a+9:b+4])
else:
b = a + 9
a = html.find('img src=', b)
return img_addrs
def save_imgs(folder, img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename, 'wb') as f:
img = url_open(each)
f.write(img)
def download_mm(folder='OOXX', pages=10):
os.mkdir(folder)
os.chdir(folder)
url = "http://jandan.net/ooxx/"
page_num = int(get_page(url))
for i in range(pages):
page_num -= i
page_url = url + 'page-' + str(page_num) + '#comments'
img_addrs = find_imgs(page_url)
save_imgs(folder, img_addrs)
if __name__ == '__main__':
download_mm()
|
|