|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
好心的大佬帮帮忙
也帮我看看怎么提高爬取速度
- #1. requests 发送请求,从服务器获取数据。 要自己安装
- #2. beautifulsoup 来解析页面源代码。 要自己安装
- import requests
- from bs4 import BeautifulSoup
- import time
- headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36"
- }
- aa = "https://mmzztt.com" #网站
- resp = requests.get(aa,headers=headers)
- resp.encoding = 'utf-8'
- #解析
- qq = BeautifulSoup(resp.text,"html.parser")
- ww = qq.find('ul',attrs={'class':'uk-grid uk-grid-match uk-child-width-1-4 uk-child-width-1-6@s g-list'}).find_all('a',attrs={"uk-inline u-thumb-f"})#获取热门标签
- c = 1
- for a in ww:
- heji = a.get("href")#在热门标签里面拿到网站
- resp1 = requests.get(heji, headers=headers)#发送请求
- resp1.encoding = 'utf-8'
- qq1 = BeautifulSoup(resp1.text, "html.parser")
- yeshu = qq1.find('nav',attrs={"uk-container uk-padding-small m-pagination"}).find_next('li').find_next('li').find_next('li').find_next('li').find_next('li').text#获取一共有多少页
- yeshu = int(yeshu)#把页数转为整数
- print(yeshu)#打印有多少页
- c = 1 #网站后缀页数初始值
- n = 1 #图片顺序初始值
- while yeshu > 0:
- heji1 = heji+"page/%s/"%c#把网站拼接后缀页数
- #print(heji1)
- yeshu -= 1#把网站拼接后缀页数依次减少
- c += 1##网站后缀页数初始加1
- resp2 = requests.get(heji1, headers=headers)#发送请求
- resp2.encoding = 'utf-8'
- qq2 = BeautifulSoup(resp1.text, "html.parser")
- ww1 = qq2.find('ul',attrs={'class': 'uk-grid uk-grid-match uk-child-width-1-2@s uk-child-width-1-3@m g-list'}).find_all('img')#获取图片地址
- #mingz = qq2.find('ul', attrs={'class': 'uk-grid uk-grid-match uk-child-width-1-2@s uk-child-width-1-3@m g-list'}).find_all('div', attrs={'class': 'uk-card-body uk-padding-remove'})#获取图片名字
- for img in ww1:
- tupian = img.get('data-srcset')#[0]#获取所有图片地址"""
- #保存数据
- time.sleep(1)
- print(tupian)
- f = open("%s.jpg"%n,mode="wb") # 保存文件
- f.write(requests.get(tupian).content) # 向外那图片
- n += 1##网站后缀页数初始加初始
- print('成功')#"""
复制代码
爬取图片时,需要加headers
提速的话需要使用代理ip,没有代理ip就算用多线程也容易因为ip访问速度过快而被网站封禁
- #1. requests 发送请求,从服务器获取数据。 要自己安装
- #2. beautifulsoup 来解析页面源代码。 要自己安装
- import requests
- from bs4 import BeautifulSoup
- import time
- headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36"
- }
- aa = "https://mmzztt.com" #网站
- resp = requests.get(aa,headers=headers)
- resp.encoding = 'utf-8'
- #解析
- qq = BeautifulSoup(resp.text,"html.parser")
- ww = qq.find('ul',attrs={'class':'uk-grid uk-grid-match uk-child-width-1-4 uk-child-width-1-6@s g-list'}).find_all('a',attrs={"uk-inline u-thumb-f"})#获取热门标签
- c = 1
- for a in ww:
- heji = a.get("href")#在热门标签里面拿到网站
- resp1 = requests.get(heji, headers=headers)#发送请求
- resp1.encoding = 'utf-8'
- qq1 = BeautifulSoup(resp1.text, "html.parser")
- yeshu = qq1.find('nav',attrs={"uk-container uk-padding-small m-pagination"}).find_next('li').find_next('li').find_next('li').find_next('li').find_next('li').text#获取一共有多少页
- yeshu = int(yeshu)#把页数转为整数
- print(yeshu)#打印有多少页
- c = 1 #网站后缀页数初始值
- n = 1 #图片顺序初始值
- while yeshu > 0:
- heji1 = heji+"page/%s/"%c#把网站拼接后缀页数
- #print(heji1)
- yeshu -= 1#把网站拼接后缀页数依次减少
- c += 1##网站后缀页数初始加1
- resp2 = requests.get(heji1, headers=headers)#发送请求
- resp2.encoding = 'utf-8'
- qq2 = BeautifulSoup(resp1.text, "html.parser")
- ww1 = qq2.find('ul',attrs={'class': 'uk-grid uk-grid-match uk-child-width-1-2@s uk-child-width-1-3@m g-list'}).find_all('img')#获取图片地址
- #mingz = qq2.find('ul', attrs={'class': 'uk-grid uk-grid-match uk-child-width-1-2@s uk-child-width-1-3@m g-list'}).find_all('div', attrs={'class': 'uk-card-body uk-padding-remove'})#获取图片名字
- for img in ww1:
- tupian = img.get('data-srcset')#[0]#获取所有图片地址"""
- #保存数据
- time.sleep(1)
- print(tupian)
- f = open("%s.jpg"%n,mode="wb") # 保存文件
- f.write(requests.get(tupian, headers=headers).content) # 向外那图片
- n += 1##网站后缀页数初始加初始
- print('成功')#"""
复制代码
|
|