问题
报错KeyError: 'foreignList' ??? 能再详细点吗?贴出代码呀! 值错误。foreignLis赋值的类型是不是不符合当前逻辑操作? 代码在这:# -*- coding: utf-8 -*-# 导入模块
import json
import requests
import pandas as pd
import csv
# 抓取数据
## 先把数据都爬下来,查看数据结构,明确要整理保存的数据
def catch_data1():
# url_1包含中国各省市当日实时数据(也有全球数据,但是腾讯改版后好久没更新了)
url_1 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
response = requests.get(url=url_1).json()
data_1 = json.loads(response['data'])
return data_1
data_1 = catch_data1()
def catch_data2():
# url_2包含全球实时数据及历史数据、中国历史数据及每日新增数据
url_2 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other'
data_2 = json.loads(requests.get(url=url_2).json()['data'])
return data_2
data_2 = catch_data2()
lastUpdateTime = data_1["lastUpdateTime"]# 腾讯最近更新时间
directory = 'D:\\x4'
# 获取中国当日实时数据
china_data = data_1["areaTree"]["children"]
## 获取中国各城市当日实时数据
filename = directory + lastUpdateTime.split(' ') + "_china_city_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["province", "city_name", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
writer.writerow(header)
for j in range(len(china_data)):
province = china_data["name"]# 省份
city_list = china_data["children"]# 该省份下面城市列表
for k in range(len(city_list)):
city_name = city_list["name"]# 城市名称
total_confirm = city_list["total"]["confirm"]# 总确诊病例
total_suspect = city_list["total"]["suspect"]# 总疑似病例
total_dead = city_list["total"]["dead"]# 总死亡病例
total_heal = city_list["total"]["heal"]# 总治愈病例
today_confirm = city_list["today"]["confirm"]# 今日确诊病例
data_row = [province, city_name, total_confirm, total_suspect, total_dead,
total_heal, today_confirm, lastUpdateTime]
writer.writerow(data_row)
## 获取中国各省当日实时数据
filename = directory + lastUpdateTime.split(' ') + "_china_province_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["province", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
writer.writerow(header)
for i in range(len(china_data)):
province = china_data["name"]# 省份
total_confirm = china_data["total"]["confirm"]# 总确诊病例
total_suspect = china_data["total"]["suspect"]# 总疑似病例
total_dead = china_data["total"]["dead"]# 总死亡病例
total_heal = china_data["total"]["heal"]# 总治愈病例
today_confirm = china_data["today"]["confirm"]# 今日确诊病例
data_row =
writer.writerow(data_row)
# 获取中国历史数据及每日新增数据
chinaDayList = pd.DataFrame(data_2["chinaDayList"])# 中国历史数据
filename = directory + lastUpdateTime.split(' ') + "_china_history_data.csv"
header = ["date", "confirm", "suspect", "dead", "heal", "nowConfirm", "nowSevere", "deadRate", "healRate"]
chinaDayList = chinaDayList# 重排数据框列的顺序
chinaDayList.to_csv(filename, encoding="utf_8_sig", index=False)
chinaDayAddList = pd.DataFrame(data_2["chinaDayAddList"])# 中国每日新增数据
filename = directory + lastUpdateTime.split(' ') + "_china_DayAdd_data.csv"
header = ["date", "confirm", "suspect", "dead", "heal", "deadRate", "healRate"]
chinaDayAddList = chinaDayAddList# 重排数据框列的顺序
chinaDayAddList.to_csv(filename, encoding="utf_8_sig", index=False)
# 湖北与非湖北历史数据
def get_data_1():
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["date", "dead", "heal", "nowConfirm", "deadRate", "healRate"]# 定义表头
writer.writerow(header)
for i in range(len(hubei_notHhubei)):
data_row = ["date"], hubei_notHhubei["dead"], hubei_notHhubei["heal"],
hubei_notHhubei["nowConfirm"], hubei_notHhubei["deadRate"],
hubei_notHhubei["healRate"]]
writer.writerow(data_row)
hubei_notHhubei = data_2["dailyHistory"]# 湖北与非湖北历史数据
for w in ["hubei", "notHubei"]:
filename = directory + lastUpdateTime.split(' ') + "_" + w + "_history_data.csv"
get_data_1()
# 获取湖北省与非湖北每日新增数据
hubei_DayAdd = pd.DataFrame(data_2["dailyNewAddHistory"])# 中国历史数据
filename = directory + lastUpdateTime.split(' ') + "_hubei_notHubei_DayAdd_data.csv"
hubei_DayAdd.to_csv(filename, encoding="utf_8_sig", index=False)
# 获取武汉与非武汉每日新增数据
wuhan_DayAdd = data_2["wuhanDayList"]
filename = directory + lastUpdateTime.split(' ') + "_wuhan_notWuhan_DayAdd_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["date", "wuhan", "notWuhan", "notHubei"]# 定义表头
writer.writerow(header)
for i in range(len(wuhan_DayAdd)):
data_row = ["date"], wuhan_DayAdd["wuhan"]["confirmAdd"],
wuhan_DayAdd["notWuhan"]["confirmAdd"], wuhan_DayAdd["notHubei"]["confirmAdd"], ]
writer.writerow(data_row)
# 全球实时数据及历史数据
## 获取全球各地区实时数据
global_data = data_2['foreignList']
filename = directory + lastUpdateTime.split(' ') + "_global_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["country", "date", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
writer.writerow(header)
# 先写入中国的数据
chinadate = lastUpdateTime.split(' ').replace('-', '.')
chinaData = ["中国", chinadate, data_1["chinaTotal"]["confirm"], data_1["chinaTotal"]["suspect"],
data_1["chinaTotal"]["dead"], data_1["chinaTotal"]["heal"],
data_1["chinaAdd"]["confirm"], lastUpdateTime]
writer.writerow(chinaData)
# 再写入其他国家地区的数据
for i in range(len(global_data)):
country = global_data["name"]# 国家或地区
date = global_data["date"]# 日期
total_confirm = global_data["confirm"]# 总确诊病例
total_suspect = global_data["suspect"]# 总疑似病例
total_dead = global_data["dead"]# 总死亡病例
total_heal = global_data["heal"]# 总治愈病例
today_confirm = global_data["confirmAdd"]# 今日确诊病例
data_row =
writer.writerow(data_row)
## 出于需要,转换一下英文名
## 世界各国中英文对照Chinese_to_English.xlsx下载于百度百科,自己添加了“日本本土”和“钻石号邮轮”的英文名,不然merge不出来。
world_name = pd.read_excel("Chinese_to_English.xlsx", sep='\t', encoding="utf-8")
globaldata = pd.read_csv(filename, encoding="utf_8_sig")
globaldata = pd.merge(globaldata, world_name, left_on="country", right_on="中文", how="inner")
header = ["country", "英文", "date", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
globaldata = globaldata
globaldata.to_csv(filename, encoding="utf_8_sig", index=False)
## 获取全球历史数据(除中国以外的总量)
globalDailyHistory = data_2["globalDailyHistory"]
filename = directory + lastUpdateTime.split(' ') + "_globalDailyHistory.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["date", "total_dead", "total_heal", "newAddConfirm"]
writer.writerow(header)
for i in range(len(globalDailyHistory)):
date = globalDailyHistory["date"]# 日期
total_dead = globalDailyHistory["all"]["dead"]# 总死亡病例
total_heal = globalDailyHistory["all"]["heal"]# 总治愈病例
newAddConfirm = globalDailyHistory["all"]["newAddConfirm"]# 今日确诊病例
data_row =
writer.writerow(data_row)
## 获取全球总量实时数据(中国以外)
globalNow = data_2["globalStatis"]
filename = directory + lastUpdateTime.split(' ') + "_globalNow.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["nowConfirm", "confirm", "heal", "dead", "lastUpdateTime"]
writer.writerow(header)
data_row = , globalNow["confirm"], globalNow["heal"], globalNow["dead"], lastUpdateTime]
writer.writerow(data_row)
# 获取韩国、意大利、日本本土各城市当日实时数据
global_data = data_2["foreignList"]
dictt = {"韩国": "Korea", "意大利": "Italy", "日本本土": "Japan"}
for j in dictt.keys():
filename = directory + lastUpdateTime.split(' ') + "_" + dictt + "_city_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["country", "city_name", "date", "nameMap", "total_confirm", "total_suspect", "total_dead",
"total_heal", "confirmAdd", "lastUpdateTime"]
writer.writerow(header)
for k in range(len(global_data)):
if global_data["name"] == j:
city_list = global_data["children"]# 该国家下面城市列表
for h in range(len(city_list)):
city_name = city_list["name"]# 城市中文名
date = city_list["date"]# 日期
nameMap = city_list["nameMap"]# 城市英文名
total_confirm = city_list["confirm"]# 总确诊病例
total_suspect = city_list["suspect"]# 总疑似病例
total_dead = city_list["dead"]# 总死亡病例
total_heal = city_list["heal"]# 总治愈病例
confirmAdd = city_list["confirmAdd"]# 新增确诊病例
data_row = [j, city_name, date, nameMap, total_confirm, total_suspect, total_dead, total_heal,
confirmAdd, lastUpdateTime]
writer.writerow(data_row)
第110行 代码捏???? txxcat 发表于 2020-4-17 17:57
能再详细点吗?贴出代码呀!
# -*- coding: utf-8 -*-
# 导入模块
import json
import requests
import pandas as pd
import csv
# 抓取数据
## 先把数据都爬下来,查看数据结构,明确要整理保存的数据
def catch_data1():
# url_1包含中国各省市当日实时数据(也有全球数据,但是腾讯改版后好久没更新了)
url_1 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
response = requests.get(url=url_1).json()
data_1 = json.loads(response['data'])
return data_1
data_1 = catch_data1()
def catch_data2():
# url_2包含全球实时数据及历史数据、中国历史数据及每日新增数据
url_2 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other'
data_2 = json.loads(requests.get(url=url_2).json()['data'])
return data_2
data_2 = catch_data2()
lastUpdateTime = data_1["lastUpdateTime"]# 腾讯最近更新时间
directory = 'D:\\x4'
# 获取中国当日实时数据
china_data = data_1["areaTree"]["children"]
## 获取中国各城市当日实时数据
filename = directory + lastUpdateTime.split(' ') + "_china_city_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["province", "city_name", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
writer.writerow(header)
for j in range(len(china_data)):
province = china_data["name"]# 省份
city_list = china_data["children"]# 该省份下面城市列表
for k in range(len(city_list)):
city_name = city_list["name"]# 城市名称
total_confirm = city_list["total"]["confirm"]# 总确诊病例
total_suspect = city_list["total"]["suspect"]# 总疑似病例
total_dead = city_list["total"]["dead"]# 总死亡病例
total_heal = city_list["total"]["heal"]# 总治愈病例
today_confirm = city_list["today"]["confirm"]# 今日确诊病例
data_row = [province, city_name, total_confirm, total_suspect, total_dead,
total_heal, today_confirm, lastUpdateTime]
writer.writerow(data_row)
## 获取中国各省当日实时数据
filename = directory + lastUpdateTime.split(' ') + "_china_province_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["province", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
writer.writerow(header)
for i in range(len(china_data)):
province = china_data["name"]# 省份
total_confirm = china_data["total"]["confirm"]# 总确诊病例
total_suspect = china_data["total"]["suspect"]# 总疑似病例
total_dead = china_data["total"]["dead"]# 总死亡病例
total_heal = china_data["total"]["heal"]# 总治愈病例
today_confirm = china_data["today"]["confirm"]# 今日确诊病例
data_row =
writer.writerow(data_row)
# 获取中国历史数据及每日新增数据
chinaDayList = pd.DataFrame(data_2["chinaDayList"])# 中国历史数据
filename = directory + lastUpdateTime.split(' ') + "_china_history_data.csv"
header = ["date", "confirm", "suspect", "dead", "heal", "nowConfirm", "nowSevere", "deadRate", "healRate"]
chinaDayList = chinaDayList# 重排数据框列的顺序
chinaDayList.to_csv(filename, encoding="utf_8_sig", index=False)
chinaDayAddList = pd.DataFrame(data_2["chinaDayAddList"])# 中国每日新增数据
filename = directory + lastUpdateTime.split(' ') + "_china_DayAdd_data.csv"
header = ["date", "confirm", "suspect", "dead", "heal", "deadRate", "healRate"]
chinaDayAddList = chinaDayAddList# 重排数据框列的顺序
chinaDayAddList.to_csv(filename, encoding="utf_8_sig", index=False)
# 湖北与非湖北历史数据
def get_data_1():
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["date", "dead", "heal", "nowConfirm", "deadRate", "healRate"]# 定义表头
writer.writerow(header)
for i in range(len(hubei_notHhubei)):
data_row = ["date"], hubei_notHhubei["dead"], hubei_notHhubei["heal"],
hubei_notHhubei["nowConfirm"], hubei_notHhubei["deadRate"],
hubei_notHhubei["healRate"]]
writer.writerow(data_row)
hubei_notHhubei = data_2["dailyHistory"]# 湖北与非湖北历史数据
for w in ["hubei", "notHubei"]:
filename = directory + lastUpdateTime.split(' ') + "_" + w + "_history_data.csv"
get_data_1()
# 获取湖北省与非湖北每日新增数据
hubei_DayAdd = pd.DataFrame(data_2["dailyNewAddHistory"])# 中国历史数据
filename = directory + lastUpdateTime.split(' ') + "_hubei_notHubei_DayAdd_data.csv"
hubei_DayAdd.to_csv(filename, encoding="utf_8_sig", index=False)
# 获取武汉与非武汉每日新增数据
wuhan_DayAdd = data_2["wuhanDayList"]
filename = directory + lastUpdateTime.split(' ') + "_wuhan_notWuhan_DayAdd_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["date", "wuhan", "notWuhan", "notHubei"]# 定义表头
writer.writerow(header)
for i in range(len(wuhan_DayAdd)):
data_row = ["date"], wuhan_DayAdd["wuhan"]["confirmAdd"],
wuhan_DayAdd["notWuhan"]["confirmAdd"], wuhan_DayAdd["notHubei"]["confirmAdd"], ]
writer.writerow(data_row)
# 全球实时数据及历史数据
## 获取全球各地区实时数据
global_data = data_2['foreignList']
filename = directory + lastUpdateTime.split(' ') + "_global_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["country", "date", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
writer.writerow(header)
# 先写入中国的数据
chinadate = lastUpdateTime.split(' ').replace('-', '.')
chinaData = ["中国", chinadate, data_1["chinaTotal"]["confirm"], data_1["chinaTotal"]["suspect"],
data_1["chinaTotal"]["dead"], data_1["chinaTotal"]["heal"],
data_1["chinaAdd"]["confirm"], lastUpdateTime]
writer.writerow(chinaData)
# 再写入其他国家地区的数据
for i in range(len(global_data)):
country = global_data["name"]# 国家或地区
date = global_data["date"]# 日期
total_confirm = global_data["confirm"]# 总确诊病例
total_suspect = global_data["suspect"]# 总疑似病例
total_dead = global_data["dead"]# 总死亡病例
total_heal = global_data["heal"]# 总治愈病例
today_confirm = global_data["confirmAdd"]# 今日确诊病例
data_row =
writer.writerow(data_row)
## 出于需要,转换一下英文名
## 世界各国中英文对照Chinese_to_English.xlsx下载于百度百科,自己添加了“日本本土”和“钻石号邮轮”的英文名,不然merge不出来。
world_name = pd.read_excel("Chinese_to_English.xlsx", sep='\t', encoding="utf-8")
globaldata = pd.read_csv(filename, encoding="utf_8_sig")
globaldata = pd.merge(globaldata, world_name, left_on="country", right_on="中文", how="inner")
header = ["country", "英文", "date", "total_confirm", "total_suspect", "total_dead", "total_heal",
"today_confirm", "lastUpdateTime"]
globaldata = globaldata
globaldata.to_csv(filename, encoding="utf_8_sig", index=False)
## 获取全球历史数据(除中国以外的总量)
globalDailyHistory = data_2["globalDailyHistory"]
filename = directory + lastUpdateTime.split(' ') + "_globalDailyHistory.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["date", "total_dead", "total_heal", "newAddConfirm"]
writer.writerow(header)
for i in range(len(globalDailyHistory)):
date = globalDailyHistory["date"]# 日期
total_dead = globalDailyHistory["all"]["dead"]# 总死亡病例
total_heal = globalDailyHistory["all"]["heal"]# 总治愈病例
newAddConfirm = globalDailyHistory["all"]["newAddConfirm"]# 今日确诊病例
data_row =
writer.writerow(data_row)
## 获取全球总量实时数据(中国以外)
globalNow = data_2["globalStatis"]
filename = directory + lastUpdateTime.split(' ') + "_globalNow.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["nowConfirm", "confirm", "heal", "dead", "lastUpdateTime"]
writer.writerow(header)
data_row = , globalNow["confirm"], globalNow["heal"], globalNow["dead"], lastUpdateTime]
writer.writerow(data_row)
# 获取韩国、意大利、日本本土各城市当日实时数据
global_data = data_2["foreignList"]
dictt = {"韩国": "Korea", "意大利": "Italy", "日本本土": "Japan"}
for j in dictt.keys():
filename = directory + lastUpdateTime.split(' ') + "_" + dictt + "_city_data.csv"
with open(filename, "w+", encoding="utf_8_sig", newline="") as csv_file:
writer = csv.writer(csv_file)
header = ["country", "city_name", "date", "nameMap", "total_confirm", "total_suspect", "total_dead",
"total_heal", "confirmAdd", "lastUpdateTime"]
writer.writerow(header)
for k in range(len(global_data)):
if global_data["name"] == j:
city_list = global_data["children"]# 该国家下面城市列表
for h in range(len(city_list)):
city_name = city_list["name"]# 城市中文名
date = city_list["date"]# 日期
nameMap = city_list["nameMap"]# 城市英文名
total_confirm = city_list["confirm"]# 总确诊病例
total_suspect = city_list["suspect"]# 总疑似病例
total_dead = city_list["dead"]# 总死亡病例
total_heal = city_list["heal"]# 总治愈病例
confirmAdd = city_list["confirmAdd"]# 新增确诊病例
data_row = [j, city_name, date, nameMap, total_confirm, total_suspect, total_dead, total_heal,
confirmAdd, lastUpdateTime]
writer.writerow(data_row)
翁坤1125 发表于 2020-4-17 18:04
代码捏????
在审核 xiaosi4081 发表于 2020-4-17 18:09
在审核
{:10_250:}尴尬啦 xiaosi4081 发表于 2020-4-17 18:09
在审核
就只报了这个错嘛 data_2只包含:
chinaDayList
chinaDayAddList
dailyNewAddHistory
dailyHistory
wuhanDayList
articleList
provinceCompare
cityStatis
nowConfirmStatis
没有'foreignList',所以报错。 txxcat 发表于 2020-4-17 18:37
data_2只包含:
chinaDayList
chinaDayAddList
那具体怎么修改呢 xiaosi4081 发表于 2020-4-17 18:58
那具体怎么修改呢
这个是网站的问题了,恐怕只有删掉没有的部分的代码才能运行,后面还有几个项目也没有,貌似都是国外的部分。
页:
[1]