马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 Daniel_Zhang 于 2020-12-17 23:37 编辑
爬贴吧图:
import urllib.request as u_request
import re
import ssl
def open_url(url):
header ={}
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; M1 Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
req = u_request.Request(url,headers = header)
ssl._create_default_https_context = ssl._create_unverified_context # 似乎现在会出现 urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] 的异常抛出,所以重写了https的默认检验方式
html = u_request.urlopen(req).read().decode('utf-8')
return html
def get_img(html):
p = r'<img class="BDE_Image" src="([^"]+\.jpg)"' # 个人认为这里可以改成 (jpg|png|jpeg) 正则表达式学的不是太好,献丑了,没遇到其他格式的图片,没机会尝试,不过应该是对哒
image_list = re.findall(p, html)
for each in image_list:
filename = each.split('/')[-1]
u_request.urlretrieve(each,filename, None)
if __name__ == '__main__':
url = 'https://tieba.baidu.com/p/6251730285'
get_img(open_url(url))
爬 ip 地址:
import urllib.request as u_request
import re
import ssl
def open_url(url):
header ={}
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; M1 Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
req = u_request.Request(url,headers = header)
ssl._create_default_https_context = ssl._create_unverified_context
html = u_request.urlopen(req).read().decode('utf-8')
return html
def get_ip(html):
p = r'((?:(?:[0,1]?\d?\d|2[0-4]\d|25[0-5])\.){3}(?:[0,1]?\d?\d|2[0-4]\d|25[0-5]))</td>\n<td>(\d+)</td>' # 基于小甲鱼老师的基础,增加了端口号的获取,正则表达式学得不好,应该有更优解
iplist = re.findall(p, html)
for each in iplist:
string = each[0] + ':' + each[1]
print(string)
if __name__ == '__main__':
url = 'http://cn-proxy.com/archives/218' # 原来的那个地址被墙了(也没有任何代理ip在页面里显示了),现在这个也被墙了,但是有ip地址和端口号,无特殊手段可能存在无法访问的情况
get_ip(open_url(url))
|