关于爬虫的问题,大佬捞捞我
我想爬取深交所官网上市公司的年报,只要年度报告,但是爬取出来了很多季度报告和报告摘要,求大佬指导,这个代码应该怎么修改才能只爬出年度报告。import os
import math
import json
import requests
from copy import deepcopy
URL = 'http://www.szse.cn/api/disc/announcement/annList'
HEADER = {
'Host': 'www.szse.cn',
'Origin': 'http://www.szse.cn',
'Referer': 'http://www.szse.cn/disclosure/listed/fixed/index.html',
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
'Content-Type': 'application/json',
'Connection': 'keep-alive',
'X-Request-Type': 'ajax',
'X-Requested-With': 'XMLHttpRequest',
}
PAGE_SIZE = 30
PAYLOAD = {
'channelCode': ["fixed_disc"],
'pageNum': 1,
'pageSize': PAGE_SIZE,
'seDate': ["", ""],
'stock': ["000001"],
}
PDF_URL_PREFIX = 'http://disc.static.szse.cn/download'
def get_pdf_url(code, begin_date, end_date):
pdf_urls = []
payload = deepcopy(PAYLOAD)
payload['stock'] =
payload['seDate'] =
res = requests.post(URL, data=json.dumps(payload), headers=HEADER).json()
for i in res['data']:
file_name = '_'.join(, ''.join(i['publishTime'].split().split('-'))])
pdf_url = PDF_URL_PREFIX + i['attachPath']
pdf_urls.append((file_name, pdf_url))
page_count = math.ceil(res['announceCount'] / PAGE_SIZE)
for j in range(page_count - 1):
payload['pageNum'] = j + 2
res = requests.post(URL, data=json.dumps(payload), headers=HEADER).json()
for i in res['data']:
file_name = '_'.join(, ''.join(i['publishTime'].split().split('-'))])
pdf_url = PDF_URL_PREFIX + i['attachPath']
pdf_urls.append((file_name, pdf_url))
return pdf_urls
def save_pdf(code, path='./', begin_date='', end_date=''):
pdf_urls = get_pdf_url(code, begin_date, end_date)
file_path = os.path.join(path, code)
if not os.path.isdir(file_path):
os.makedirs(file_path)
for file_name, url in pdf_urls:
extension = url.split('.')[-1]
file_full_name = os.path.join(file_path, '.'.join()).replace('*', '')
rs = requests.get(url, stream=True)
with open(file_full_name, "wb") as fp:
for chunk in rs.iter_content(chunk_size=10240):
if chunk:
fp.write(chunk)
if __name__ == '__main__':
l = ['300500 ', '300499 ']
for i in l:
save_pdf(i, begin_date='2005-12-27', end_date='2019-12-27')
time.sleep(random.uniform(1, 2))
本帖最后由 YunGuo 于 2021-1-12 04:14 编辑
import requests
import json
import os
def save_pdf(title, ts, download_url, folder_name):
"""
下载
:param title: pdf文件标题
:param ts: 报告发表时间
:param download_url: 报告下载地址
:param folder_name: 文件夹名
:return:
"""
print('正在下载:', title)
if not os.path.isdir(f'./{folder_name}'):
os.makedirs(f'./{folder_name}')
res = requests.get(download_url, stream=True)
with open(f'./{folder_name}/{title}_{ts}.pdf', 'wb') as f:
for chunk in res.iter_content(chunk_size=10240):
if chunk:
f.write(chunk)
def get_item(page):
"""
请求数据
:param page: 页码
:return:
"""
url = 'http://www.szse.cn/api/disc/announcement/annList'
headers = {
"Content-Type": "application/json",
"Host": "www.szse.cn",
"Origin": "http://www.szse.cn",
"Referer": "http://www.szse.cn/disclosure/listed/fixed/index.html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36",
}
data = {
# 参数按需求改
# 010301:年度报告010303:半年度报告010305:一季度报告010307:三季度报告
"bigCategoryId": ["010301"],# 设置报告类型
"channelCode": ["fixed_disc"],# 固定不变
"pageNum": page,# 设置页码
"pageSize": 30,# 设置一页报告数量
"seDate": ["", ""] # 设置时间:格式["2017-01-01", "2021-01-01"]
}
res = requests.post(url, headers=headers, data=json.dumps(data))
datas = res.json().get('data')
# 将报告类型id作为文件夹名
folder_name = ''.join(data.get('bigCategoryId'))
for info in datas:
title = info.get('title').replace('*', '')
ts = info.get('publishTime').split(' ')
download_url = 'http://disc.static.szse.cn/download' + info.get('attachPath')
save_pdf(title, ts, download_url, folder_name)
if __name__ == '__main__':
# 需要下载的页数
page_num = 10
for i in range(1, page_num+1):
print(f'第{i}页'.center(50, '='))
get_item(i)
print('=' * 50)
谢谢大佬,我想问一下如果我只想下载300开头的公司我应该怎么修改呢?是只改url就行了吗?然后我下载的文件名可以修改吗? YunGuo 发表于 2021-1-12 04:09
谢谢大佬,我想问一下如果我只想下载300开头的公司我应该怎么修改呢?是只改url就行了吗?然后我下载的文件名可以修改吗 masterbo 发表于 2021-1-12 10:55
谢谢大佬,我想问一下如果我只想下载300开头的公司我应该怎么修改呢?是只改url就行了吗?然后我下载的 ...
你想怎样设置文件名?可以看代码中的注释。
另外,下载300开头的公司不用改url,获取到数据后加个判断就行了。
import requests
import json
import os
def save_pdf(title, ts, download_url, folder_name, code):
"""
下载
:param title: pdf文件标题
:param ts: 报告发表时间
:param download_url: 报告下载地址
:param folder_name: 文件夹名
:param code: 公司代码
:return:
"""
print('正在下载:', title)
if not os.path.isdir(f'./{folder_name}'):
os.makedirs(f'./{folder_name}')
res = requests.get(download_url, stream=True)
# 这里设置pdf文件名格式是:公司代码(code) + pdf文件标题(title) + 发表时间(ts)
# 如果觉得名字太长,只需要标题作为文件名,删掉下面的code和ts就行
with open(f'./{folder_name}/[{code}]{title}_{ts}.pdf', 'wb') as f:
for chunk in res.iter_content(chunk_size=10240):
if chunk:
f.write(chunk)
def get_item(page, code_head):
"""
请求数据
:param page: 页码
:param code_head: 公司代码前三位数
:return:
"""
url = 'http://www.szse.cn/api/disc/announcement/annList'
headers = {
"Content-Type": "application/json",
"Host": "www.szse.cn",
"Origin": "http://www.szse.cn",
"Referer": "http://www.szse.cn/disclosure/listed/fixed/index.html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36",
}
data = {
# 参数按需求改
# 010301:年度报告010303:半年度报告010305:一季度报告010307:三季度报告
"bigCategoryId": ["010301"],# 设置报告类型
"channelCode": ["fixed_disc"],# 固定不变
"pageNum": page,# 设置页码
"pageSize": 30,# 设置一页报告数量
"seDate": ["", ""] # 设置时间:格式["2017-01-01", "2021-01-01"]
}
res = requests.post(url, headers=headers, data=json.dumps(data))
datas = res.json().get('data')
# 将报告类型id作为最外层文件夹名,以公司代码开头数字作为内文件名,方便分类
# 如果要使用固定文件夹名,自行修改folder_name的值,两层格式: XXX/XXX 一层格式: XXX
folder_name = f"{''.join(data.get('bigCategoryId'))}/{str(code_head)}"
for info in datas:
code = ''.join(info.get('secCode'))
if code[:3] == str(code_head):
title = info.get('title').replace('*', '')
ts = info.get('publishTime').split(' ')
download_url = 'http://disc.static.szse.cn/download' + info.get('attachPath')
save_pdf(title, ts, download_url, folder_name, code)
if __name__ == '__main__':
# 需要下载的页数
page_num = 1
# 指定公司代码以XXX开头
code_head = 300
for i in range(1, page_num+1):
print(f'第{i}页'.center(50, '='))
get_item(i, code_head)
print('=' * 50)
页:
[1]