爬出来的原本是五个网页数据,结果被最后一个覆盖了就只有第五页的数据
import requestsimportre
import csv
for i in range(5):
url="https://movie.douban.com/top250?start={}&filter=".format(i*25)
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
}
res=requests.get(url,headers=headers)
page_content=res.text
#解析数据
obj=re.compile(r'<li>.*?</a>.*?<span class="title">(?P<name>.*?)'
r'</span>.*?<p class="">.*?<br>(?P<year>.*?) .*?<span '
r'class="rating_num" property="v:average">(?P<value>.*?)</span>.*?'
r'<span>(?P<num>.*?)人评价</span>',re.S)
result=obj.finditer(page_content)
f=open("data.csv",mode="w",encoding='utf-8')
csvwriter= csv.writer(f)
for i in result:
#print(i.group("name"))
#print(i.group("year"))
#print(i.group("value"))
#print(i.group("num"))
dic=i.groupdict()
dic['year']=dic['year'].strip()
csvwriter.writerow(dic.values())
f.close()
res.close()
print('over') 代码中的问题在于,在循环每一页的时候,每一页都会打开一个新的文件,并写入数据,但是在循环结束的时候,只有最后一个文件被关闭了,其他的文件没有被关闭,导致数据没有被写入。解决方法是在循环外打开文件,每一页写入数据后不关闭文件,等到循环结束后再关闭文件。
修改后的代码如下:
import requests
import re
import csv
f = open("data.csv", mode="w", encoding='utf-8')
csvwriter = csv.writer(f)
for i in range(5):
url = "https://movie.douban.com/top250?start={}&filter=".format(i * 25)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
}
res = requests.get(url, headers=headers)
page_content = res.text
# 解析数据
obj = re.compile(
r'<li>.*?</a>.*?<span class="title">(?P<name>.*?)'
r'</span>.*?<p class="">.*?<br>(?P<year>.*?) .*?<span '
r'class="rating_num" property="v:average">(?P<value>.*?)</span>.*?'
r'<span>(?P<num>.*?)人评价</span>', re.S)
result = obj.finditer(page_content)
for i in result:
dic = i.groupdict()
dic['year'] = dic['year'].strip()
csvwriter.writerow(dic.values())
f.close()
res.close()
print('over')
页:
[1]