爬取动态网页
请问各位大佬,这个https://www.jianshu.com/网页是动态加载,手动下滑才可以显示出下面的东西,我这个代码该怎么修改一下才可以获取全部文章的url呢import logging
import asyncio
from pyppeteer import launch
from pyppeteer.errors import TimeoutError
logging.basicConfig(level=logging.INFO,format = '%(asctime)s - %(levelname)s: %(message)s')
index_url = 'https://www.jianshu.com/'
PAGE = 1
TIMEOUT = 10
#设置宽高
WINDOW_WIDTH,WINDOW_HIGHT = 1366,768
HEADLESS = False
#初始化方法
brower,tab = None,None
async def init():
global brower,tab
brower = await launch(headless = HEADLESS,args=['--disable-infobars',f'--window-size={WINDOW_WIDTH},{WINDOW_HIGHT}'])
tab = await brower.newPage()
await tab.evaluateOnNewDocument('Object.defineProperty(navigator,"webdriver",{get:()=>undefined})')
await tab.setViewport({'width':WINDOW_WIDTH,'height':WINDOW_HIGHT})
#定义一个通用爬取
async def scrape_url(url,selector):
logging.info(f'正在爬取{url}')
try:
await tab.goto(url)
await tab.waitForSelector(selector,options={
'timeout':TIMEOUT * 1000
})
except TimeoutError:
logging.error(f'爬取{url}出现错误',exc_info=True)
#爬取列表页
async def scrape_index():
await scrape_url(url=index_url,selector='.title')
#解析出详情页url
async def parse_url():
return await tab.querySelectorAllEval('.title','nodes => nodes.map(node => node.href)')
async def main():
await init()
try:
await scrape_index()
detail = await parse_url()
logging.info(f'url is {detail}')
finally:
await brower.close()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
要爬取动态网页并获取全部文章的URL,你可以使用Selenium WebDriver和Chrome浏览器来模拟用户操作下滑页面。这是修改后的代码示例:
import logging
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
index_url = 'https://www.jianshu.com/'
# 设置Chrome浏览器选项
chrome_options = Options()
chrome_options.add_argument('--disable-infobars')
chrome_options.add_argument('--window-size=1366,768')
# 如果不想看到浏览器界面,则使用无头模式
# chrome_options.add_argument('--headless')
# 初始化WebDriver
driver = webdriver.Chrome(options=chrome_options)
# 打开网页
driver.get(index_url)
# 模拟下滑操作
while True:
# 下滑到页面底部
driver.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)# 等待页面加载
# 判断是否已经滑到了底部
if driver.execute_script('return window.innerHeight + window.pageYOffset >= document.body.offsetHeight'):
break
# 获取全部文章的URL
urls = []
elements = driver.find_elements_by_css_selector('.title')
for element in elements:
urls.append(element.get_attribute('href'))
# 输出结果
logging.info(f'URLs: {urls}')
# 关闭浏览器
driver.quit()
请注意,这个解决方案使用了Selenium库和Chrome浏览器驱动程序。确保你已经安装了相应的库并下载了对应版本的Chrome驱动程序。另外,你也可以根据需要调整代码中的延时时间和其他设置。
如果问题已经解决,请设置最佳答案 {:10_277:} 自己在浏览器上抓包看一下数据是从哪里来的。再进一步往下分析呢。 用Reqable提取出来的数据内容太多了,我只想要电话和号码归属地 "https://thirdwx.qlogo.cn/mmopen/vi_32/DYAIOgq83er6n6kl9MnttwOYHhOvudQ8XAk1PDxRtJI9R7MTUClMxictGTOMSicHFialB849rb6ib48291xIp196UQ/132",
"mobilePhone": "13606068124",
"birthdate": null,
"teacherName": null,
"tag": null,
"saleGrade": null,
"wechatUserId": 61,
"saleUserId": 171213,
"disTime": "2023-12-30 20:12:02",
"wxAddType": "1",
"remark": "删",
"intentionId": 305185,
"teacherQwUserId": null,
"intention": null,
"intentionTime": null,
"nickname": "***",
"wxName": "***",
"type": 2,
"addAsk": false,
"phoneOne": false,
"phoneTwo": false,
"workPhoneNum": 0,
"msgNum": 0,
"unionId": "oJJijuFMRwSO4d_pciJRAEYpFwHA",
"modelLabel": null,
"brandLabel": "未知",
"gradeLabel": null,
"saleGradeLabel": null,
"platformLabel": "IOS手机",
"sourcePlatformLabel": "IOS手机",
"distributionLabel": null,
"genderLabel": null,
"tagLabel": null,
"wxAddTypeLabel": "主动添加",
"intentionLabel": null,
"isMyAccount": true,
"workNum": 0,
"dateWorkNum": 0,
"bpVO": {
"id": 303752,
"userId": 305377,
"t": 33,
"a": 0,
"b": 2,
"c": 1,
"d": 30,
"isDeleted": false,
"createTime": "2023-12-30 20:12:03"
},
"qwList": null,
"msgCount": null,
"invitation": null,
"toDoInfo": null,
"repeatCount": null,
"portrait": null,
"learnNum": 1,
"wareNum": 11
}
],
"total": 16237,
"size": 100,
"current": 1,
"orders": [],
"optimizeCountSql": true,
"searchCount": true,
"countId": null,
"maxLimit": null,
"pages": 163
}
}
页:
[1]