|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
刚接触爬虫,写的很粗糙。但是还是要纪念一下
大佬路过请指点迷津(有啥不足改进的地方)
- import requests
- import os
- import base64
- import datetime
- def url_open(url):
- headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
- response=requests.get(url=url,headers=headers)
- html=response.content
- return html
- #获取第一个加密后的地址
- def get_page(url):
- html=url_open(url).decode()
- a=html.find('current-comment-page')+23
- b=html.find(']',a)
- num= html[a:b]
- now=datetime.date.today()
- if now.month<10:
- now=str(now.year)+'0'+str(now.month)+str(now.day)
- else:
- now=str(now.year)+str(now.month)+str(now.day)
- num=now+'-'+num
- first_url='http://jandan.net/ooxx/'+base64.b64encode(num.encode('utf-8')).decode()+'#comments'
- return first_url
- #获取所有图片的真实地址
- def get_img_url(url):
- html=url_open(url).decode()
- img_list=[]
- a=html.find('img src=')
- while a!=-1:
- b=b=html.find('.jpg',a,a+255)
- if b!=-1:
- img_url='http://'+html[a+11:b+4]
- img_list.append(img_url)
- else:
- b=a+11
- a=html.find('img src=',b)
-
- return img_list
- #根据第一个加密地址获取下一个加密地址
- def ch_url(url):
- temp=url.split('/')[4].split('#')[0]
- page=base64.b64decode(temp.encode('utf-8'))
- #
- now=datetime.date.today()
- if now.month<10:
- now=str(now.year)+'0'+str(now.month)+str(now.day)
- else:
- now=str(now.year)+str(now.month)+str(now.day)
- next_page=int(page.decode().split('-')[1])-1
- new_page=now+'-'+str(next_page)
- #
- new_page=base64.b64encode(new_page.encode('utf-8')).decode()
- new_url='http://jandan.net/ooxx/'+new_page+'#comments'
- return new_url
- #获取所有加密地址并储存在列表中
- def get_all(url,pages):
- url_list=[]
- first_url=get_page(url=url)
- url_list.append(first_url)
- for i in range(pages-1):
- new_url=ch_url(url=first_url)
- url_list.append(new_url)
- first_url=new_url
- return url_list
- #储存图片
- def save_img(img_list):
- for each in img_list:
- html=url_open(each)
- filename=each.split('/')[-1]
- with open(filename,'wb')as f:
- f.write(html)
- #主函数
- def download_mm(folder='ooxx',pages=10):
- os.mkdir(folder)
- os.chdir(folder)
- url='http://jandan.net/ooxx'
- url_list=get_all(url=url,pages=pages)
- for each in url_list:
- img_list=get_img_url(each)
- save_img(img_list)
- if __name__=='__main__':
- download_mm(folder=input('请输入要保存的文件夹名'),pages=int(input('请输入需要爬取的页数')))
复制代码 |
|