|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
我用anaconda3按照课堂代码输入一片后,总是出现
ValueError: unknown url type: '//ws3.sinaimg.cn/mw600/7b386511gy1fjjhvsl2zmj20zk0npn3s.jpg'的错误,仔细检查之后没有发现原因出在哪里,
- # -*- coding: utf-8 -*-
- """
- Created on Thu Sep 14 20:51:52 2017
- @author: Administrator
- """
- import urllib.request
- import os
- #import random
- # proxies = ['119.6.144.70:81','111.1.36.9:80','203.144.144.162:8080']
- # proxy = random.choice(proxies)
- # proxy_support = urllib.request.ProxyHandler({'http':proxy})
- # opener = urllib.request.build_opener(proxy_support)
- # urllib.request.install_opener(opener)
- def url_open(url):
- req = urllib.request.Request(url)
- req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0 Name')
- response = urllib.request.urlopen(url)
- html = response.read()
- return html
- def get_page(url):
- html = url_open(url).decode('utf-8')
- a = html.find('current-comment-page') + 23
- b = html.find(']',a)
- return html[a:b]
-
-
- def find_imgs(url):
- html = url_open(url).decode('utf-8')
- img_addrs = []
-
- a = html.find('img src=')
-
- while a != -1:
- b = html.find('.jpg',a, a+255)
- if b != -1:
- img_addrs.append(html[a+9:b+4])
- else:
- b = a + 9
-
- a = html.find('img src=', b)
- return img_addrs
-
- def save_imgs(folder,img_addrs):
- for each in img_addrs:
- filename = each.split('/')[-1]
- with open(filename, 'wb') as f:
- img = url_open(each)
- f.write(img)
-
- def download_mm(folder='OOXX',pages=10):
- os.makedirs(folder)
- os.chdir(folder)
-
- url = 'http://jandan.net/ooxx/'
- page_num = int(get_page(url))
-
- for i in range(pages):
- page_num -= i
- page_url = url + 'page' + str(page_num) + '#comments'
- img_addrs = find_imgs(page_url)
- save_imgs(folder,img_addrs)
-
-
- if __name__ == '__main__':
- download_mm()
复制代码
另外,好像加入代理模式之后,还会出现其他错误
'//ws3.sinaimg.cn/mw600/7b386511gy1fjjhvsl2zmj20zk0npn3s.jpg'
正确的是'http://ws3.sinaimg.cn/mw600/7b386511gy1fjjhvsl2zmj20zk0npn3s.jpg'
- import urllib.request
- import sys
- import os
- def url_open(url):
- req = urllib.request.Request(url)
- req.add_header('User-Agent',
- 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')
- resp = urllib.request.urlopen(url)
- html = resp.read()
- # print(html)
- return html
- def get_page(url): # 获取图片页码
- html = url_open(url).decode('utf-8') # 解码,之后查找图片页码做准备
- a = html.find('current-comment-page') + 23 # 返回页码数的首位置
- b = html.find(']', a) # 返回页码数的末位置
- # print(html[a:b])
- return html[a:b] # 拼接返回页码
- def find_imgs(url): # 在当前URL查找图片
- html = url_open(url).decode('utf-8')
- img_addrs = []
- a = html.find('img src=') # 图片真实地址
- while a != -1:
- b = html.find('.jpg', a, a + 200)
- if b != -1:
- img_addrs.append(html[a + 9:b + 4])
- else:
- b = a + 9
- a = html.find('img src=', b)
- return img_addrs
- def save_imgs(img_addrs):
- for each in img_addrs:
- filename = each.split('/')[-1]
- with open(filename, 'wb') as f:
- img = url_open("http:"+each)
- f.write(img)
- def download(folder='Girls', pages=20):
- os.mkdir(folder) # 创建文件夹
- os.chdir(folder) # 切换到文件夹目录
- url = 'http://jandan.net/ooxx'
- page_num = int(get_page(url)) # 获取当前该图片页码
- for i in range(pages): # 依次下载指定页数的图片
- page_num -= 1
- page_url = url + '/page-' + str(page_num) + '#comments' # 图片的URL
- # print(page_url)
- img_addrs = find_imgs(page_url)
- save_imgs(img_addrs)
- if __name__ == '__main__':
- download()
复制代码
|
|