爬取58同城,代码运行无误,但未能爬到东西
代码可以运行但是无法爬出数据,运行完后csv表为空。问题应该在select的定位上,但是总是解决不了。
感谢帮助!
import urllib.request
from bs4 import BeautifulSoup
import pandas as pd
#爬取数据
def request_Data(url):
#创建requests对象
req = urllib.request.Request(url)
page_data_list = []
with urllib.request.urlopen(req) as response:
data = response.read()
htmlstr = data.decode()
L = parse_HTMLData(htmlstr)
page_data_list.extend(L)
return page_data_list
#解析数据
def parse_HTMLData(htmlstr):
sp = BeautifulSoup(htmlstr,'html.parser')
#获得房子信息列表
house_list = sp.select('body > div.main-wrap > div.content-wrap > div.content-side-left> li:nth-child')
#当前页中的记录列表
page_list = []
for house in house_list:
#每一行数据
rows_list = []
#获得房子标题
title = house.select('body > div.main-wrap > div.content-wrap > div.content-side-left> li')
title = (title.text).strip()
rows_list.append(title)
#获得房子信息
infos = house.select('body > div.main-wrap > div.content-wrap > div.content-side-left > li > div.list-info > p')
# 获得房子户型
house_type = (infos.text).strip()
rows_list.append(house_type)
# 获得房子面积
house_area = (infos.text).strip()
rows_list.append(house_area)
# 获得房子朝向
house_face = (infos.text).strip()
rows_list.append(house_face)
# 获得房子楼层
house_floor = (infos.text).strip()
rows_list.append(house_floor)
#获得房子所在城区
addr_dist = house.select('body > div.main-wrap > div.content-wrap > div.content-side-left > li > div.list-info > p:nth-child(3) > span > a:nth-child(2)')
body > div.main-wrap > div.content-wrap > div.content-side-left > ul > li:nth-child(1) > div.list-info > p:nth-child(3) > span > a:nth-child(2)
rows_list.append(addr_dist)
#获得房子所在小区
addr_name = house.select('body > div.main-wrap > div.content-wrap > div.content-side-left > li > div.list-info > p:nth-child(3) > span > a:nth-child(1)')
addr_name = (addr_name.text).strip()
rows_list.append(addr_name)
#获得房子总价
total_price = house.select('body > div.main-wrap > div.content-wrap > div.content-side-left > li.sendsoj.hove > div.price > p.sum > b')
total_price = (total_price.text).strip()
rows_list.append(total_price)
#获得房子单价
price = house.select('body > div.main-wrap > div.content-wrap > div.content-side-left > li.sendsoj.hove > div.price > p.unit')
price = (price.text).strip()
rows_list.append(price)
page_list.append(rows_list)
return page_list
url_temp = 'http://sh.ganji.com/ershoufang/pn{}/'
data_list = []
for i in range(1,11):#总共70页
url = url_temp.format(i)
print(url)
print('+++++第{}页++++++'.format(i))
try:
L = request_Data(url)
data_list.extend(L)
except Exception as e:
#不再循环
print('不再有数据,结束循环')
break
print(data_list)
#保存数据
#列名
colsname = ['标题', '户型', '面积', '朝向', '楼层', '城区', '小区名', '总价', '单价']
df = pd.DataFrame(data_list, columns = colsname)
df.to_csv('house_data.csv',index = False,encoding='gbk') 本帖最后由 YunGuo 于 2020-9-15 20:22 编辑
一步步检查,实在不行就换xpath解析 你先不要考虑select,先考虑下有没有爬到数据,再考虑定位问题
print(htmlstr)
你打印过这个没?运行了下你的代码,打印出来是显示要输入验证码,没爬到数据啊 # _*_ coding: utf-8 _*_
# Developer: suchocolate
# Date: 9/15/2020 22:14
# File name: 58tc.py
# Development tool: PyCharm
import requests
from lxml import etree
from openpyxl import Workbook
def main():
wb = Workbook()
ws = wb.active
head = ['标题', '户型', '面积', '朝向', '楼层', '城区', '小区名', '总价(万)', '单价']
ws.append(head)
base_url = 'https://sh.58.com/ershoufang/pn'
headers = {'user-agent': 'firefox'}
for num in range(1, 11):
url = base_url + str(num)
r = requests.get(url, headers=headers)
html = etree.HTML(r.text)
result = html.xpath('//li[@class="sendsoj"]')
for item in result:
data = []
title = item.xpath('./div/h2/a/text()')
data.extend(title)
house_type = item.xpath('./div/p/span/text()')
data.extend(house_type)
area = item.xpath('./div/p/span/text()')
data.extend(area)
orient = item.xpath('./div/p/span/text()')
data.extend(orient)
floor = item.xpath('./div/p/span/text()')
data.extend(floor)
urban = item.xpath('./div/p/span/a/text()')
data.extend(urban)
community = item.xpath('./div/p/span/a/text()')
data.extend(community)
total_price = item.xpath('./div/p/b/text()')
data.extend(total_price)
uni_price = item.xpath('./div/p/text()')
data.extend(uni_price)
ws.append(data)
wb.save('test.xlsx')
if __name__ == '__main__':
main() https://sh.58.com/ershoufang/pn1/
你换下网址试下,或者直接用requests模块,因为有302的重定向,urllib.request不知道会不会自动处理重定向,一直都用requests,requests是会自动处理重定向的
页:
[1]