第055讲:论一只爬虫的自我修养4:OOXX
import urllib.requestimport os
import base64
def url_open(url):
#添加文件头
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
#访问页面
response = urllib.request.urlopen(req)
html = response.read()
return html
#获取页码
def get_page(url):
html = url_open(url).decode('utf-8')
#查找(不懂偏移量)
a = html.find('current-comment-page') + 23
b = html.find(']', a)
return html
#获取页码图片地址
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
#查找(不懂偏移量)
a = html.find('img src=')
#找不到a的话,返回-1
while a != -1:
b = html.find('.jpg', a, a+255)
#找不到b的话,返回-1
if b != -1:
img_addrs.append('http:' + html)
else:
#回到//原位置查找
b = a + 9
a = html.find('img src=', b)
return img_addrs
#保存图片
def save_imgs(folder, img_addrs):
for each in img_addrs:
#分割split,以最后一个/
filename = each.split('/')[-1]
with open(filename, 'wb') as f:
#打开图片,二进制码
img = url_open(each)
#写入
f.write(img)
def download_mm(folder='ooxx', pages=20):
#创建文件夹
os.mkdir(folder)
#保存文件夹
os.chdir(folder)
url = 'http://jandan.net/ooxx/'
#获取页码
page_num = int(get_page(url))
for i in range(pages):
page_num -= i
#进行base64加密
num = '20200826-'
bytes_num = num.encode('utf-8')
str_num = base64.b64encode(bytes_num).decode('utf-8')
#获取当前页面地址
page_url = url + str_num + str(page_num) + '#comments'
#获取页面内图片地址
img_addrs = find_imgs(page_url)
#保存页面到指定的文件夹
save_imgs(folder, img_addrs)
#调用主函数
if __name__ == '__main__':
download_mm()
谁能帮我看下,为啥只能爬一页,明明pages=10,一页爬完报错了,求解
还有那个偏移量是怎么算的呢,爬的是煎蛋网http://jandan.net/ooxx,求解 我一般用xpath(供参考):
import requests
from lxml import etree
import os
def main():
dir_name = 'pics'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
os.chdir(dir_name)
# num = int(input('请输入想下载的页数:'))
num = 3
url = 'http://jandan.net/ooxx'
headers = {'user-agent': 'firefox'}
result = []
r = requests.get(url, headers=headers)
html = etree.HTML(r.text)
nx_page = html.xpath('//a[@class="previous-comment-page"]/@href')
for item in range(num):
r = requests.get('http:' + nx_page, headers=headers)
html = etree.HTML(r.text)
result.extend(html.xpath('//img[@referrerpolicy="no-referrer"]/@src'))
nx_page = html.xpath('//a[@class="previous-comment-page"]/@href')
pic_num = len(result)
print(f'总共{pic_num}张图片')
dl_counter = 1
for item in result:
pic_name = item.split('/')[-1]
try:
r = requests.get('http:' + item, headers=headers, timeout=5)
except Exception as e:
print(e)
with open(pic_name, 'wb') as f:
f.write(r.content)
print(f'已下载{pic_name}, 共下载{dl_counter}。')
dl_counter += 1
if __name__ == '__main__':
main() 数据采集多了还需要用代理IP吧{:10_257:} 学习了 up up
num = '20201213-'
#### 加密要加页码一起加‘20201213-’+str(page_num))
bytes_num = (num+str(page_num)).encode('utf-8')
str_num = base64.b64encode(bytes_num).decode('utf-8')
####
# print(base64.b64decode(str_num))
page_url = url+str_num+'#comments'
楼主,今天看了你的代码收获很多,我试了一下你的代码应该是加密位置, 几行错了,
页:
[1]