|

楼主 |
发表于 2023-5-5 10:15:14
|
显示全部楼层
9:作:实现任意贴吧的爬虫,保存网页到本地,流程:
列表
遍历
保存
- import requests
- class TiebaSpider:
- def __init__(self,tieba_name):
- self.tieba_name = tieba_name # 下一行+tieba_name+,2个+?
- self.url_temp="https://tieba.baidu.com/f?kw="+tieba_name+"&ie=utf-8&pn={}"
- self.headers={"User-Agent":"Mozilla/5.0(Macintosh; Intel Mac OS X 10_13_2)ApplewebKit/53"}
- def get_url_list(self):#1.构造url列表
- url_list = []
- for i in range(1000):
- url_list.append(self.url_temp.format(i*50)) return url_list
- def parse_url(self,url):#发送请求,获取响应
- print(url)
- response=requests.get(url,headers=self.headers)
- return response.content.decode()
- def save_html(self,html_str,page_num):#保存html字符串
- file path="1-第0页.html".format(self.tieba name,page num)
- with open(file path."w".encoding:"utf-8") as f: #"李毅-第4页.html"
- f.write(html_str)
- def run(self):#实现主要逻辑
- #1.构造url列表
- url_list= self.get_url_list()#2.遍历,发送请求,获取响应 for url in url list:
- html_str=self.parse_url(url)#3.保存
- page_num =url_list.index(url)+1 #页码数 self.save_html(html_str,page_num)
- if __name__ == '__main__':
- tieba_spider = TiebaSpider("李毅")
- tieba_spider.run()
-
复制代码 |
|