爬取天气导入csv时数据缺少
import requests
from bs4 import BeautifulSoup
import csv
header = ('城市', '高温', '低温')
def parse_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/108.0.1462.54 Safari/537.36',
}
response = requests.get(url, headers=headers)
text = response.content.decode('utf-8')
soup = BeautifulSoup(text, 'html5lib')
conMidtab = soup.find('div', class_='conMidtab')
tables = conMidtab.find_all('table')
lst = []
for table in tables:
trs = table.find_all('tr')
for index, tr in enumerate(trs):
tds = tr.find_all('td')
city_td = tds# 城市
if index == 0:
city_td = tds
info = {}
city = list(city_td.stripped_strings)# 只要文本
temp_max_td = tds[-5]# 最高温度
temp_max = list(temp_max_td.stripped_strings)
temp_min_td = tds[-2]# 最低温度
temp_min = list(temp_min_td.stripped_strings)
info['城市'] = city
info['高温'] = temp_max + '℃'
info['低温'] = temp_min + '℃'
lst.append(info)
return lst
def writedata(lst):
with open('weather.csv', 'w', encoding='utf-8', newline='') as file_obj:
writer = csv.DictWriter(file_obj, fieldnames=header)
writer.writeheader()
writer.writerows(lst)
def main():
lst = []
urls = ['http://www.weather.com.cn/textFC/hb.shtml',# 华北地区
'http://www.weather.com.cn/textFC/db.shtml',# 东北地区
'http://www.weather.com.cn/textFC/hd.shtml',# 华东地区
'http://www.weather.com.cn/textFC/hz.shtml',# 华中地区
'http://www.weather.com.cn/textFC/hn.shtml',# 华南
'http://www.weather.com.cn/textFC/xn.shtml',# 西南
'http://www.weather.com.cn/textFC/xb.shtml',# 西北
'http://www.weather.com.cn/textFC/gat.shtml']
for url in urls:
lst += parse_page(url)
writedata(lst)
if __name__ == '__main__':
main()
不导出csv的时候数据是全的,但是导出去的时候发现只有一部分数据 在pycharm打印结果是463个城市,但是导出csv的时候只有112条 def writedata(lst):
print(len(lst))# 这打印就是112个
suchocolate 发表于 2023-1-5 12:46
def writedata(lst):
print(len(lst))# 这打印就是112个
是哪里出问题了{:5_104:} suchocolate 发表于 2023-1-5 12:46
def writedata(lst):
print(len(lst))# 这打印就是112个
import requests
from bs4 import BeautifulSoup
def parse_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/108.0.1462.54 Safari/537.36',
}
response = requests.get(url, headers=headers)
text = response.content.decode('utf-8')
soup = BeautifulSoup(text, 'html5lib')
conMidtab = soup.find('div', class_='conMidtab')
tables = conMidtab.find_all('table')
for table in tables:
trs = table.find_all('tr')
for index, tr in enumerate(trs):
tds = tr.find_all('td')
city_td = tds# 城市
if index == 0:
city_td = tds
city = list(city_td.stripped_strings)# 只要文本
temp_max_td = tds[-5]# 最高温度
temp_max = list(temp_max_td.stripped_strings)
temp_min_td = tds[-2]# 最低温度
temp_min = list(temp_min_td.stripped_strings)
print(city, temp_max + '℃', temp_min + '℃')
def main():
# lst = []
urls = ['http://www.weather.com.cn/textFC/hb.shtml',# 华北地区
'http://www.weather.com.cn/textFC/db.shtml',# 东北地区
'http://www.weather.com.cn/textFC/hd.shtml',# 华东地区
'http://www.weather.com.cn/textFC/hz.shtml',# 华中地区
'http://www.weather.com.cn/textFC/hn.shtml',# 华南
'http://www.weather.com.cn/textFC/xn.shtml',# 西南
'http://www.weather.com.cn/textFC/xb.shtml',# 西北
'http://www.weather.com.cn/textFC/gat.shtml']# 港澳台地区
for url in urls:
# 调用函数
parse_page(url)
if __name__ == '__main__':
main() 有大佬吗,新人请教
页:
[1]