|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
如果url一页一页输入没有问题,加入循环就出现问题了:
url:https://www.kuaidaili.com/free/inha/3 每页就是最后的页码改变,其余的不变
- import urllib.request
- import re
- import os
- import random
- def url_open(url):
- req = urllib.request.Request(url)
- req.add_header("user-agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36")
- proxies = ["125.108.123.66:9000","183.166.103.45:9999","115.195.84.31:8118","61.178.149.237:59042"]
- proxy = random.choice(proxies)
- proxy_support = urllib.request.ProxyHandler({'http':proxy})
- opener = urllib.request.build_opener(proxy_support)
- urllib.request.install_opener(opener)
- response = urllib.request.urlopen(req)
- html = response.read().decode("utf-8")
- return html
- def split_addrs_port(temp_str):
- [a,b] = temp_str.split(">",2)
- [c,d] =b.split("<",2)
- return c
-
- def get_addrs_port(url):
- html = url_open(url)
- compile_ip = re.compile(r'("IP">(([0-1]?\d?\d|2[0-4]\d|25[0-5])\.){3}([0-1]?\d?\d|2[0-4]\d|25[0-5])<)')
- compile_port = re.compile(r""PORT">\d{0,5}<")
- url_addrs_temp = compile_ip.findall(html)
- url_port_temp= compile_port.findall(html)
- list_addrs_port = []
- for i in range(len(url_addrs_temp)):
- url_addrs = split_addrs_port(str(url_addrs_temp[i][0]))
- url_port = split_addrs_port(str(url_port_temp[i]))
- url_addrs_port = url_addrs + ":" + url_port
- list_addrs_port.append(url_addrs_port)
- return list_addrs_port
- def save_addrs_port(addrs_port):
- with open("ip.docx","a") as f:
- for each in addrs_port:
- f.write(str(each) + "\n")
-
- def download_url(folder = "proxy_support", pages = "2"):
- '''os.chdir("C:\\Users\\Chysial\\Desktop")
- os.mkdir(folder)
- os.chdir(folder)'''
- for i in range(int(pages)):
- page_num = i+1
- url = "https://www.kuaidaili.com/free/inha/"
- pages_url = url +str(page_num)
- url_addrs_port = get_addrs_port(pages_url)
- '''save_addrs_port(url_addrs_port)'''
- print(url_addrs_port)
- if __name__ == "__main__":
- download_url()
复制代码
结果:
['36.249.48.41:9999', '113.124.92.190:9999', '113.195.202.209:9999', '163.204.245.192:9999', '220.249.149.241:9999', '115.218.2.38:9000', '220.176.33.151:9999', '58.32.34.219:8118', '123.163.96.8:9999', '116.196.87.86:20183', '113.120.35.180:9999', '182.149.83.171:9999', '111.160.169.54:41820', '118.113.246.159:9999', '60.13.42.67:9999']
Traceback (most recent call last):
File "C:/python/5.爬虫/proxy_support.py", line 55, in <module>
download_url()
File "C:/python/5.爬虫/proxy_support.py", line 50, in download_url
url_addrs_port = get_addrs_port(pages_url)
File "C:/python/5.爬虫/proxy_support.py", line 22, in get_addrs_port
html = url_open(url)
File "C:/python/5.爬虫/proxy_support.py", line 13, in url_open
response = urllib.request.urlopen(req)
File "C:\Users\Chysial\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\Chysial\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "C:\Users\Chysial\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Users\Chysial\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "C:\Users\Chysial\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "C:\Users\Chysial\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 503: Service Temporarily Unavailable
我找了半天,不知道哪里出错了,各位大佬看看啊!(注,我那正则表达式有点问题,没有取消贪婪,这样对结果没影响,各位大佬先帮忙找一下,为啥出现错误吧)
|
|