python爬虫爬取b站视频评论问题在哪里?出错找不到,大佬看看代码求求大佬帮忙....
import requestsimport csv
import hashlib
import time
from urllib.parse import quote
#w_rid加密参数
def GetW(wts,NextPage):
pagination_str = quote(NextPage)
l = ["mode=2",
"oid=113814703972019",
f"pagination_str={pagination_str}",
"plat=1",
"type=1",
"web_location=1315875",
f"wts={wts}"
]
y = '&'.join(l)
string= y + "ea1db124af3c7062474693fa704f4ff8"
MD5 = hashlib.md5()
MD5.update(string.encode('utf-8'))
w_rid = MD5.hexdigest()
print(w_rid)
return w_rid
def GetContent(offset):
headers = {"cookie":"bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDE1Nzk0MzAsImlhdCI6MTc0MTMyMDE3MCwicGx0IjotMX0.FsFshnZPFdht_Fj7Pxqp5XgzHlgnYPnpoGWX53F1imw; bili_ticket_expires=1741579370; buvid3=1D55509D-6660-9458-1C1A-6EC43569BE4130240infoc; b_nut=1741320227; buvid4=6A35DDCB-75FC-4C6D-03B6-B828DEA70BA430240-025030704-QwNGI+jNopUTI1GhLVomaw%3D%3D; _uuid=1D11D2210-2BC9-2529-8C44-C1010A76E8D381028745infoc; CURRENT_FNVAL=4048; buvid_fp=2f0eefefa84c6a4ab0d42472a63a6dd4; b_lsid=D414ECEB_1956F97B63D; csrf_state=130ae0096803953f25dfdd2b87144e35; SESSDATA=cdefeabd%2C1756887034%2C4ca20%2A32CjD7yyeY9-VFI_RWy8IsjId3cPUs-xTs-3CYvAyVNuAehtfvU-_ii9uXRESeWr7O9I4SVlVNQUFBUzR6NkxLcDh6SGhvVWtPbWVqZUJ6U2t6OFgtYVdqVEFJb3ZtSjJmTUQ3VHBlRFh5LXFNSDN0bUlOTlgwMF93RVV4aHh5TXB5N3o0SVRDOUdRIIEC; bili_jct=1018cac8e70a199a4feca14132a9055c; DedeUserID=3546377353169818; DedeUserID__ckMd5=b43dfea08718ba45; sid=mcmifm2h", "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0",}
,}
url = 'https://api.bilibili.com/x/v2/reply/wbi/main'
pagination_str = '{"offset":%s}' % offset
wts = int(time.time())
w_rid = GetW(wts = wts,NextPage = pagination_str)
data = {
'oid': '114040961501285',
'type': '1',
'mode':' 2',
'pagination_str': '{"offset":%s}' % offset,
'plat': '1',
'seek_rpid': '',
'web_location': '1315875',
'w_rid': w_rid,
'wts': 'wts',}
response = requests.get(url = url, params = data ,headers = headers)
#Get查询参数
#请求方法:Get请求方法
#获取数据
json_data = response.json()
#解析数据
#字典取值
replies = json_data['data']['replies']
for index in replies:
dit = {
'昵称':index['member']['uname'],
'性别' :index['member']['sex'],
'地区':index['reply_control']['location'].replace('IP属地',''),
'评论':index['content']['message'],
'点赞':index['like'],}
print(dit)
csv_writer.writerow(dit)
#保存数据
#批量采集数据
#paginaion_str 第一页为空,后续一样
#wts:时间戳可以通过time模块获取当前时间戳
#w_rid:加密参数·
NextPage = json_data['data']['cursor']['pagination_reply']['next_offset']
next_offset = json.dumps(NextPage)
return next_offset
if __name__ == '__main__':
#发送请求
#cookie 用户信息,常用于检测是否有登录账号
#user-agent 用户代理,表述浏览器用户身份信息
f = open(file = 'data.csv',mode = 'w',encoding = 'utf-8',newline = '')
#字典写入方法
csv_writer = csv.DictWriter(f,fieldnames = ['昵称','性别','地区','评论','点赞'])
#
csv_writer.writeheader()
offset = '""'
for page in range(1,21):
offset =GetContent(offset = offset)
让我帮您看看这段代码的问题。您提供的代码存在一些明显的语法错误与逻辑错误。请尝试以下修正:
1. `l =`(行不完整) - 在 GetW 函数中的这个行需要定义 `l`。
2. DitWriter 使用时缺少字段名列表。
3. requests.get 应该使用在 wts 参数,而不是 'wts'。
4. 返回的 JSON 数据中 replies 的结构需要检查,因为对其的处理似乎有误。
改正后的代码如下:
import requests
import csv
import hashlib
import time
from urllib.parse import quote
def GetW(wts, NextPage):
pagination_str = quote(NextPage)
l = [
f"pagination_str={pagination_str}",
f"wts={wts}"
]
y = '&'.join(l)
string = y + "ea1db124af3c7062474693fa704f4ff8"
MD5 = hashlib.md5()
MD5.update(string.encode('utf-8'))
w_rid = MD5.hexdigest()
print(w_rid)
return w_rid
def GetContent(offset):
headers = {
"cookie": "your_cookie_here",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0"
}
url = 'https://api.bilibili.com/x/v2/reply/wbi/main'
pagination_str = '{"offset":%s}' % offset
wts = int(time.time())
w_rid = GetW(wts=wts, NextPage=pagination_str)
data = {
'oid': '114040961501285',
'type': '1',
'mode': '2',
'pagination_str': pagination_str,
'plat': '1',
'seek_rpid': '',
'web_location': '1315875',
'w_rid': w_rid,
'wts': wts,
}
response = requests.get(url=url, params=data, headers=headers)
json_data = response.json()
replies = json_data.get('data', {}).get('replies', [])
for index in replies:
dit = {
'昵称': index.get('member', {}).get('uname', ''),
'性别': index.get('member', {}).get('sex', ''),
'地区': index.get('member', {}).get('region', '').replace('IP属地', ''),
'评论': index.get('content', {}).get('message', ''),
'点赞': index.get('like', 0),
}
print(dit)
csv_writer.writerow(dit)
next_offset = json_data.get('data', {}).get('page', {}).get('next', "")
print(next_offset)
return next_offset
if __name__ == '__main__':
f = open(file='data.csv', mode='w', encoding='utf-8', newline='')
fieldnames = ['昵称', '性别', '地区', '评论', '点赞']
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
offset = ""
for page in range(1, 21):
offset = GetContent(offset=offset)
f.close()
请确保用您自己的 cookies 替换 "your_cookie_here"。希望这可以帮助您找到并解决问题。
以上回复来自 FishC AI 小助理,如未能正确解答您的问题,请继续追问。 确保您的网络环境可以访问Bilibili API
请检查所提供的cookie是否有效且符合Bilibili的登录状态要求,因为某些API接口可能需要用户登录才能访问
考虑到API速率限制或反爬虫机制,频繁请求可能导致IP被暂时封禁或其他限制,请合理设置请求间隔
对于data.csv文件路径,确保脚本有权限在该路径下创建和写入文件
在实际运行此脚本前,建议先测试单次请求是否能正常工作,以确保所有参数配置正确 被反爬了怎么办呢
- 修复了wts参数错误,使用实际时间戳替代字符串'wts'
- 优化了pagination_str参数结构,使用json序列化方式
- 重构了加密参数生成逻辑,确保符合B站API要求
- 添加了异常处理机制和API响应状态检查
- 规范了代码格式,移除了冗余代码
页:
[1]