到底哪错了?
import requestsfrom bs4 import BeautifulSoup
import pandas as pd
def pars_soup(soup):
items = []
m_feed_item_list = soup.find_all('div', 'm_feed_item')
for m_feed_item in m_feed_item_list:
temp = dict()
id = m_feed_item.get('id')
name = m_feed_item.find('p').text
qa = m_feed_item.find_all('div', 'm_feed_txt')
q = qa.text.strip()
if len(qa) == 2:
a = qa.text.strip()
else:
a = ''
temp['id'] = id
temp['name'] = name
temp['question'] = q
temp['answer'] = a
items.append(temp)
return items
url = 'https://sns.sseinfo.com/ajax/feeds.do?type=11&pageSize=10&lastid=-1&show=1&page=1&_=1690729988214'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48',
}
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, features="html.parser")
data = pars_soup(soup)# 解析后的数据列表
# 将数据存储到DataFrame对象
df = pd.DataFrame(data)
# 将DataFrame写入Excel文件
output_path = 'f://123.xlsx'
df.to_excel(output_path, index=False)
print(f"数据已成功写入到文件:{output_path}")
以上程序可以提取到内容
我在搜索框输入关键字进入的搜索结果页面却提取不到内容,到底哪里错了?
import requests
from bs4 import BeautifulSoup
import pandas as pd
def pars_soup(soup):
items = []
m_feed_item_list = soup.find_all('div', 'm_feed_item')
for m_feed_item in m_feed_item_list:
temp = dict()
id = m_feed_item.get('id')
name = m_feed_item.find('p').text
qa = m_feed_item.find_all('div', 'm_feed_txt')
q = qa.text.strip()
if len(qa) == 2:
a = qa.text.strip()
else:
a = ''
temp['id'] = id
temp['name'] = name
temp['question'] = q
temp['answer'] = a
items.append(temp)
return items
url = "https://sns.sseinfo.com/qasearch.do"
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"Connection": "keep-alive",
"Content-Length": "65",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Host": "sns.sseinfo.com",
"Origin": "https://sns.sseinfo.com",
"Referer": "https://sns.sseinfo.com/search.do?keyword=%E7%A9%BA%E9%97%B4%E8%AE%A1%E7%AE%97&keywordEnd=%E7%A9%BA%E9%97%B4%E8%AE%A1%E7%AE%97",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.188",
"X-Requested-With": "XMLHttpRequest"
} # 请求头
data = {
"keyword": "空间计算"
} # 请求体的参数
response = requests.post(url, data=data, headers=headers) # 发送POST请求
soup = BeautifulSoup(response.text, features="html.parser")
data = pars_soup(soup)# 解析后的数据列表
# 将数据存储到DataFrame对象
df = pd.DataFrame(data)
# 将DataFrame写入Excel文件
output_path = 'f://123.xlsx'
df.to_excel(output_path, index=False)
print(f"数据已成功写入到文件:{output_path}")
这两个反了 歌者文明清理员 发表于 2023-7-30 23:40
这两个反了
{:5_106:} isdkz 发表于 2023-7-31 00:16
好像没反,他第二个的截图是https://sns.sseinfo.com/qasearch.do 歌者文明清理员 发表于 2023-7-31 00:18
好像没反,他第二个的截图是https://sns.sseinfo.com/qasearch.do
好吧,我睡觉去了{:5_103:} isdkz 发表于 2023-7-31 00:21
好吧,我睡觉去了
大神,怎么解? 你可以尝试进行如下修改:
import requests
from bs4 import BeautifulSoup
import pandas as pd
def pars_soup(soup):
items = []
m_feed_item_list = soup.find_all('div', 'sns_item')
for m_feed_item in m_feed_item_list:
temp = dict()
id = m_feed_item.get('id')
name = m_feed_item.find('div', 'sns_item_title').text.strip()
qa = m_feed_item.find_all('p', 'sns_item_text')
q = qa.text.strip()
if len(qa) == 2:
a = qa.text.strip()
else:
a = ''
temp['id'] = id
temp['name'] = name
temp['question'] = q
temp['answer'] = a
items.append(temp)
return items
url = "https://sns.sseinfo.com/qasearch.do"
data = {
"keyword": "空间计算"
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48",
"Referer": "https://sns.sseinfo.com/search.do?keyword=%E7%A9%BA%E9%97%B4%E8%AE%A1%E7%AE%97&keywordEnd=%E7%A9%BA%E9%97%B4%E8%AE%A1%E7%AE%97",
"X-Requested-With": "XMLHttpRequest"
}
response = requests.post(url, data=data, headers=headers)
soup = BeautifulSoup(response.text, features="html.parser")
data = pars_soup(soup)
df = pd.DataFrame(data)
output_path = 'f://123.xlsx'
df.to_excel(output_path, index=False)
print(f"数据已成功写入到文件:{output_path}")
请注意,这个修改是基于你提供的代码,并假设了搜索结果页面的HTML结构和提取内容的方式。如果实际情况与假设不符,你可能需要进一步进行调整。
此外,请确保你有合适的网络环境和权限,能够访问相关网页并发送POST请求。
页:
[1]