马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 匿名 于 2021-12-19 17:30 编辑
我爬取的网站是https://www.shixi.com/search/index?key=大数据,内容是全部页数的六个信息
这个代码我是照着爬取豆瓣TOP250的那个代码写的,希望有人指点我一下
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res = requests.get(url, headers=headers,verify=False)
return res
def find_occupation(res):
soup =bs4.BeautifulSoup(res.text, 'html.parser')
#岗位名称
post = []
targets = soup.find_all("dt", class_="job-name")
for each in targets:
post.append(each.text)
#公司名称
company = []
targets = soup.find_all("div", class_="comany-info-title")
for each in targets:
company.append(each.a.text)
#工作地点
area = []
targets = soup.find_all("span", class_="job-address")
for each in targets:
area.append(each.text)
#学历要求
education = []
targets = soup.find_all("dd", class_="job-des")
for each in targets:
education.append(each.text)
#薪资情况
pay = []
targets = soup.find_all("div", class_="comany-info-des")
for each in targets:
pay.append(each.text)
#发布时间
time = []
targets = soup.find_all("span", class_="job-time")
for each in targets:
try:
time.append(each.text.split('\n')[1].strip() + each.text.split('\n')[2].strip())
except:
continue
result = []
length = len(post)
for i in range(length):
result.append(post[i] + company[i] + area[i] + education[i] + pay[i] + time[i] + '\n')
return result
#找出共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('li', class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = "https://www.shixi.com/search/index?key=大数据"
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '&page=' + str(10 * i)
res = open_url(url)
result.extend(find_occupation(res))
with open ("大数据.txt", "w", encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__ == "__main__":
main()
|