爬取json
xpath怎样写可以把__INITIAL_STATE__后面的json字符串提取出来呢from selenium import webdriverfrom time import sleep
import requests
from lxml import html
etree = html.etree
import json
from selenium.webdriver.common.by import By
bro = webdriver.Chrome('D:/技能/chromedriver.exe')
bro.get('https://xiaoyuan.zhaopin.com/job/CC407288330J40383568908')
sleep(2)
cookies = bro.get_cookies()
print(cookies)
bro.quit()
#解析cookie
dic = {}
for cookie in cookies:
dic] = cookie['value']
#解析页面源码
url = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=12&pageSize=30&_v=0.43957010&at=8d9987f50aed40bc8d1362e9c44a7fba&rt=ed9c026545294384a20a4473e1e2ecd3&x-zp-page-request-id=0933f66d64684fd6b0bc0756ed6791b6-1675650906506-860845&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
}
url1 = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=&pageSize=30&_v=0.06047107&at=9790578095794e1b9cc693485ef05237&rt=6448b0d50c2d460eb823575593f5a909&cityId=&jobTypeId=&jobSource=&industryId=&companyTypeId=&dateSearchTypeId=&x-zp-page-request-id=fcf1dcda72444dc6b8a17609bdb3a02f-1676083831807-687308&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
response = requests.get(url=url1,headers=header,cookies=dic).json()#json后要加括号
list = response['data']['data']['list']
for i in list:
name = i['name']
number = i['number']
job_url = 'https://xiaoyuan.zhaopin.com/job/' + number
page = requests.get(url=job_url, headers=header,cookies=dic).text
tree = etree.HTML(page)
#解析详情页数据
imformation = tree.xpath('/html/body/script/text()')
print(imformation)
break我是这样写的,但输出的是一个空列表 from selenium import webdriver
from time import sleep
import requests
from lxml import html
etree = html.etree
import json
from selenium.webdriver.common.by import By
bro = webdriver.Chrome('D:/技能/chromedriver.exe')
bro.get('https://xiaoyuan.zhaopin.com/job/CC407288330J40383568908')
sleep(2)
cookies = bro.get_cookies()
print(cookies)
bro.quit()
#解析cookie
dic = {}
for cookie in cookies:
dic] = cookie['value']
#解析页面源码
url = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=12&pageSize=30&_v=0.43957010&at=8d9987f50aed40bc8d1362e9c44a7fba&rt=ed9c026545294384a20a4473e1e2ecd3&x-zp-page-request-id=0933f66d64684fd6b0bc0756ed6791b6-1675650906506-860845&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
}
url1 = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=&pageSize=30&_v=0.06047107&at=9790578095794e1b9cc693485ef05237&rt=6448b0d50c2d460eb823575593f5a909&cityId=&jobTypeId=&jobSource=&industryId=&companyTypeId=&dateSearchTypeId=&x-zp-page-request-id=fcf1dcda72444dc6b8a17609bdb3a02f-1676083831807-687308&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
response = requests.get(url=url1,headers=header,cookies=dic).json()#json后要加括号
list = response['data']['data']['list']
for i in list:
name = i['name']
number = i['number']
job_url = 'https://xiaoyuan.zhaopin.com/job/' + number
page = requests.get(url=job_url, headers=header,cookies=dic).text
tree = etree.HTML(page)
#解析详情页数据
imformation = tree.xpath('/html/body/script/text()') # 这里应该是 6
print(imformation)
break isdkz 发表于 2023-2-11 11:39
那该怎么才能把__INITIAL_STATE__后面的json字符串单独取出来呢,就是只要json字符串,不要__INITIAL_STATE__= 哈岁NB 发表于 2023-2-11 12:31
那该怎么才能把__INITIAL_STATE__后面的json字符串单独取出来呢,就是只要json字符串,不要__INITIAL_STA ...
from selenium import webdriver
from time import sleep
import requests
from lxml import html
etree = html.etree
import json
from selenium.webdriver.common.by import By
bro = webdriver.Chrome('D:/技能/chromedriver.exe')
bro.get('https://xiaoyuan.zhaopin.com/job/CC407288330J40383568908')
sleep(2)
cookies = bro.get_cookies()
print(cookies)
bro.quit()
#解析cookie
dic = {}
for cookie in cookies:
dic] = cookie['value']
#解析页面源码
url = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=12&pageSize=30&_v=0.43957010&at=8d9987f50aed40bc8d1362e9c44a7fba&rt=ed9c026545294384a20a4473e1e2ecd3&x-zp-page-request-id=0933f66d64684fd6b0bc0756ed6791b6-1675650906506-860845&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
}
url1 = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=&pageSize=30&_v=0.06047107&at=9790578095794e1b9cc693485ef05237&rt=6448b0d50c2d460eb823575593f5a909&cityId=&jobTypeId=&jobSource=&industryId=&companyTypeId=&dateSearchTypeId=&x-zp-page-request-id=fcf1dcda72444dc6b8a17609bdb3a02f-1676083831807-687308&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
response = requests.get(url=url1,headers=header,cookies=dic).json()#json后要加括号
list = response['data']['data']['list']
for i in list:
name = i['name']
number = i['number']
job_url = 'https://xiaoyuan.zhaopin.com/job/' + number
page = requests.get(url=job_url, headers=header,cookies=dic).text
tree = etree.HTML(page)
#解析详情页数据
information = tree.xpath('/html/body/script/text()').removeprefix('__INITIAL_STATE__=') # 改了这里
print(information) isdkz 发表于 2023-2-11 12:38
好的,感谢感谢 isdkz 发表于 2023-2-11 12:38
运行怎么报了'lxml.etree._ElementUnicodeResult' object has no attribute 'removeprefix'这个错误,这是为什么 呀,上网查也没有查到
哈岁NB 发表于 2023-2-11 15:57
运行怎么报了'lxml.etree._ElementUnicodeResult' object has no attribute 'removeprefix'这个错误,这 ...
代码有改过吗?我这里运行没有问题,你那个报错是因为只有字符串才有 removeprefix 方法,这说明你获取到的不是字符串 本帖最后由 哈岁NB 于 2023-2-11 16:29 编辑
from selenium import webdriver
from time import sleep
import requests
from lxml import html
etree = html.etree
bro = webdriver.Chrome('D:/技能/chromedriver.exe')
bro.get('https://xiaoyuan.zhaopin.com/job/CC407288330J40383568908')
sleep(2)
cookies = bro.get_cookies()
print(cookies)
bro.quit()
#解析cookie
dic = {}
for cookie in cookies:
dic] = cookie['value']
#解析页面源码
url = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=12&pageSize=30&_v=0.43957010&at=8d9987f50aed40bc8d1362e9c44a7fba&rt=ed9c026545294384a20a4473e1e2ecd3&x-zp-page-request-id=0933f66d64684fd6b0bc0756ed6791b6-1675650906506-860845&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
}
url1 = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=&pageSize=30&_v=0.06047107&at=9790578095794e1b9cc693485ef05237&rt=6448b0d50c2d460eb823575593f5a909&cityId=&jobTypeId=&jobSource=&industryId=&companyTypeId=&dateSearchTypeId=&x-zp-page-request-id=fcf1dcda72444dc6b8a17609bdb3a02f-1676083831807-687308&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
response = requests.get(url=url1,headers=header,cookies=dic).json()#json后要加括号
list = response['data']['data']['list']
for i in list:
name = i['name']
number = i['number']
job_url = 'https://xiaoyuan.zhaopin.com/job/' + number
page = requests.get(url=job_url, headers=header,cookies=dic).text
tree = etree.HTML(page)
#解析详情页数据
information = tree.xpath('/html/body/script/text()').removeprefix('__INITIAL_STATE__=')
print(information)isdkz 发表于 2023-2-11 16:16
代码有改过吗?我这里运行没有问题,你那个报错是因为只有字符串才有 removeprefix 方法,这说明你获取到 ...
没改过,返回的类型是<class 'lxml.etree._ElementUnicodeResult'>这个 isdkz 发表于 2023-2-11 16:16
代码有改过吗?我这里运行没有问题,你那个报错是因为只有字符串才有 removeprefix 方法,这说明你获取到 ...
好像是我的python3.9才有,我用的3.8{:10_266:} 哈岁NB 发表于 2023-2-11 16:36
好像是我的python3.9才有,我用的3.8
倒是忘了这茬了,removeprefix 这个方法是后面的版本才有的,如果不能用这个方法就直接用索引吧
from selenium import webdriver
from time import sleep
import requests
from lxml import html
etree = html.etree
bro = webdriver.Chrome('D:/技能/chromedriver.exe')
bro.get('https://xiaoyuan.zhaopin.com/job/CC407288330J40383568908')
sleep(2)
cookies = bro.get_cookies()
print(cookies)
bro.quit()
#解析cookie
dic = {}
for cookie in cookies:
dic] = cookie['value']
#解析页面源码
url = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=12&pageSize=30&_v=0.43957010&at=8d9987f50aed40bc8d1362e9c44a7fba&rt=ed9c026545294384a20a4473e1e2ecd3&x-zp-page-request-id=0933f66d64684fd6b0bc0756ed6791b6-1675650906506-860845&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
}
url1 = 'https://xiaoyuan.zhaopin.com/api/sou?S_SOU_FULL_INDEX=java&S_SOU_POSITION_SOURCE_TYPE=&pageIndex=1&S_SOU_POSITION_TYPE=2&S_SOU_WORK_CITY=&S_SOU_JD_INDUSTRY_LEVEL=&S_SOU_COMPANY_TYPE=&S_SOU_REFRESH_DATE=&order=&pageSize=30&_v=0.06047107&at=9790578095794e1b9cc693485ef05237&rt=6448b0d50c2d460eb823575593f5a909&cityId=&jobTypeId=&jobSource=&industryId=&companyTypeId=&dateSearchTypeId=&x-zp-page-request-id=fcf1dcda72444dc6b8a17609bdb3a02f-1676083831807-687308&x-zp-client-id=b242c663-f23a-4571-aca7-de919a057afe'
response = requests.get(url=url1,headers=header,cookies=dic).json()#json后要加括号
list = response['data']['data']['list']
for i in list:
name = i['name']
number = i['number']
job_url = 'https://xiaoyuan.zhaopin.com/job/' + number
page = requests.get(url=job_url, headers=header,cookies=dic).text
tree = etree.HTML(page)
#解析详情页数据
information = tree.xpath('/html/body/script/text()')
print(information) isdkz 发表于 2023-2-11 17:21
倒是忘了这茬了,removeprefix 这个方法是后面的版本才有的,如果不能用这个方法就直接用索引吧
好的,感谢感谢
页:
[1]