|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- # coding = utf-8
- import sys
- from bs4 import BeautifulSoup
- import re #正则表达式,进行文字匹配
- import urllib.request,urllib.error #指定url获取网页数据
- import sqlite3 #进行sqlite数据库操作
- def main():
- baseurl = 'https://movie.douban.com/top250?start='
- datalist = getdata(baseurl)
- savepath = './豆瓣电影top250.xls'
- # savedata(savepath)
- #askURL(url)
- #爬取网页
- def getdata(baseurl):
- datalist = []
- for each in range(0,10): #调用获取页面信息的函数,10次(一页25条。)
- url = baseurl + str(each*25)
- html = askURL(url)
- sp = BeautifulSoup(html,'html.parser')
- for i in sp.find_all('div',class_="item",limit=10):
- datalist.append(i)
- print(datalist)
- return datalist
- #2.逐一解析数据
- #得到指定一个url的网页内容
- def askURL(url):#模拟浏览器头部信息,向豆瓣服务器发送消息
- header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
- #用户代理,表示告诉豆瓣服务器,我们时什么类型的机器,浏览器(本质上是告诉浏览器,我们可以接收什么水平的内容
- req = urllib.request.Request(url=url,headers=header)
- html = ""
- try:
- response = urllib.request.urlopen((req))
- html = response.read().decode("utf-8")
- #print(html)
- except urllib.error.URLError as e:
- if hasattr(e,'code'):
- print(e.code)
- if hasattr(e,'reason'):
- print(e.reason)
- #return html
-
- def savedata(savepath):
- #3.保存数据
- pass
复制代码
报错信息为:Traceback (most recent call last):
File "C:/Users/Administrator/Desktop/计算机/python/豆瓣爬虫项目/main.py", line 57, in <module>
main()
File "C:/Users/Administrator/Desktop/计算机/python/豆瓣爬虫项目/main.py", line 12, in main
datalist = getdata(baseurl)
File "C:/Users/Administrator/Desktop/计算机/python/豆瓣爬虫项目/main.py", line 24, in getdata
sp = BeautifulSoup(html,'html.parser')
File "C:\Users\Administrator\Desktop\计算机\venv\lib\site-packages\bs4\__init__.py", line 310, in __init__
elif len(markup) <= 256 and (
TypeError: object of type 'NoneType' has no len()
Process finished with exit code 1
|
|