|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import urllib.request
- import os
- import base64
- def url_open(url):
- #添加文件头
- req = urllib.request.Request(url)
- req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
- #访问页面
- response = urllib.request.urlopen(req)
- html = response.read()
- return html
-
- #获取页码
- def get_page(url):
- html = url_open(url).decode('utf-8')
- #查找(不懂偏移量)
- a = html.find('current-comment-page') + 23
- b = html.find(']', a)
- return html[a:b]
- #获取页码图片地址
- def find_imgs(url):
- html = url_open(url).decode('utf-8')
- img_addrs = []
- #查找(不懂偏移量)
- a = html.find('img src=')
- #找不到a的话,返回-1
- while a != -1:
- b = html.find('.jpg', a, a+255)
- #找不到b的话,返回-1
- if b != -1:
- img_addrs.append('http:' + html[a+9:b+4])
- else:
- #回到//原位置查找
- b = a + 9
- a = html.find('img src=', b)
- return img_addrs
-
-
- #保存图片
- def save_imgs(folder, img_addrs):
- for each in img_addrs:
- #分割split,以最后一个/
- filename = each.split('/')[-1]
- with open(filename, 'wb') as f:
- #打开图片,二进制码
- img = url_open(each)
- #写入
- f.write(img)
- def download_mm(folder='ooxx', pages=20):
- #创建文件夹
- os.mkdir(folder)
- #保存文件夹
- os.chdir(folder)
- url = 'http://jandan.net/ooxx/'
- #获取页码
- page_num = int(get_page(url))
- for i in range(pages):
- page_num -= i
- #进行base64加密
- num = '20200826-'
- bytes_num = num.encode('utf-8')
- str_num = base64.b64encode(bytes_num).decode('utf-8')
- #获取当前页面地址
- page_url = url + str_num + str(page_num) + '#comments'
- #获取页面内图片地址
- img_addrs = find_imgs(page_url)
- #保存页面到指定的文件夹
- save_imgs(folder, img_addrs)
- #调用主函数
- if __name__ == '__main__':
- download_mm()
复制代码
谁能帮我看下,为啥只能爬一页,明明pages=10,一页爬完报错了,求解
还有那个偏移量是怎么算的呢,爬的是煎蛋网http://jandan.net/ooxx,求解
我一般用xpath(供参考):
- import requests
- from lxml import etree
- import os
- def main():
- dir_name = 'pics'
- if not os.path.exists(dir_name):
- os.mkdir(dir_name)
- os.chdir(dir_name)
- # num = int(input('请输入想下载的页数:'))
- num = 3
- url = 'http://jandan.net/ooxx'
- headers = {'user-agent': 'firefox'}
- result = []
- r = requests.get(url, headers=headers)
- html = etree.HTML(r.text)
- nx_page = html.xpath('//a[@class="previous-comment-page"]/@href')[0]
- for item in range(num):
- r = requests.get('http:' + nx_page, headers=headers)
- html = etree.HTML(r.text)
- result.extend(html.xpath('//img[@referrerpolicy="no-referrer"]/@src'))
- nx_page = html.xpath('//a[@class="previous-comment-page"]/@href')[0]
- pic_num = len(result)
- print(f'总共{pic_num}张图片')
- dl_counter = 1
- for item in result:
- pic_name = item.split('/')[-1]
- try:
- r = requests.get('http:' + item, headers=headers, timeout=5)
- except Exception as e:
- print(e)
- with open(pic_name, 'wb') as f:
- f.write(r.content)
- print(f'已下载{pic_name}, 共下载{dl_counter}。')
- dl_counter += 1
- if __name__ == '__main__':
- main()
复制代码
|
|