python爬虫获取network里面的内容
希望找到更好的获取network里面的内容的方法我现在的办法:
from browsermobproxy import Server
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
server = Server('E:/python3.6.nongye/processing.tools/datatable/browsermob-proxy-2.1.4/bin/browsermob-proxy.bat')
server.start()
proxy = server.create_proxy()
chrome_options = Options()
chrome_options.add_argument('--proxy-server={0}'.format(proxy.proxy))
driver = webdriver.Chrome(chrome_options=chrome_options)
# 要访问的地址
# base_url = "https://www.baidu.com/"
base_url = "https://zb.caupdcloud.com/#/IndicatorSystem"
proxy.new_har("zb", options={'captureHeaders': True, 'captureContent': True})
driver.get(base_url)
# 此处最好暂停几秒等待页面加载完成,不然会拿不到结果
time.sleep(9)
driver.find_element_by_xpath("/html/body/div/div/div/div/div/a").click()
time.sleep(4)
driver.find_element_by_xpath('//*[@id="nav-bar"]/li/a/h1').click()
time.sleep(20)
driver.delete_all_cookies()
driver.get_network_conditions()
driver.find_element_by_xpath('//*[@id="system-wrap"]/ul/li/p').click()
time.sleep(4)
for iii in range(20,22):
driver.find_element_by_xpath('//*[@id="nav-bar"]/li/a/h1').click()
time.sleep(4)
content_xpath = '//*[@id="system-wrap"]/ul/li[' + str(iii) + ']/p'
driver.find_element_by_xpath(content_xpath).click()
time.sleep(4)
result = proxy.har
for entry in result['log']['entries']:
_url = entry['request']['url']
print(_url)
# # 根据URL找到数据接口,这里要找的是 http://git.liuyanlin.cn/get_ht_list 这个接口
if "https://zb.caupdcloud.com/zght/getTree?targetSystem=" in _url:
_response = entry['response']
_content = _response['content']
# 获取接口返回内容
print(_response)
server.stop()
driver.quit()
这个方法的问题是每一次的点击,都是记录请求,这是我不要的,我只需要某个页面的请求,能不能把其它的请求都清空 你是说只想要一个资源的response是吧,具体爬什么内容? {:7_112:}
页:
[1]