|
10鱼币
- #-*- codeing = utf-8 -*-
- import urllib.request
- import re
- #import xlwt
- import bs4
- def main():
- url = 'https://www.qiushibaike.com/text/page/'
- saveData('糗事百科段子.txt', getData(url))
-
- def url_open(url):
- head = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
- req = urllib.request.Request(url, headers=head)
- html = ""
- try:
- response = urllib.request.urlopen(req)
- html = response.read().decode('utf-8')
- except urllib.error.URLError as e:
- if hasattr(e, 'code'):
- print(e.code)
- if hasattr(e, 'reason'):
- print(e.reason)
- return html
- def getData(url):
- html = url_open(url)
- bs = bs4.BeautifulSoup(html, 'html.parser')
- t_list = bs.find_all('div', attrs = {'class' : 'content'})
- text_list = []
- #t_list = bs.find_all(text = 'span')
- for item in t_list:
- text_list.append(item.find('span').text)
- return text_list
- def saveData(fileurl, t_list):
- for each in t_list:
- with open(fileurl, 'a+', encoding = 'utf-8') as f:
- f.write(each)
- if __name__ == "__main__":
- main()
复制代码
以上代码时我从糗事百科中爬出来的段子,把爬出来的内容写入txt文件,在txt文件中出现乱码
大神门看看我!!!
|
最佳答案
查看完整内容
试了一下,没有遇到乱码问题,楼主尝试重新运行一下
|