pyspark报错
本帖最后由 澍梵. 于 2022-4-29 17:03 编辑dataZscore:
dataZscore = (dataTransformed - dataTransformed.mean(axis=0)) /dataTransformed.std(axis=0)
dataZscore.columns = ['Z' + i for i in dataTransformed.columns]
dataZscore.head()
ZL ZR ZF ZM ZC
0 1.435707 -0.944948 14.034016 26.761154 0.315041
1 1.307152 -0.911894 9.073213 13.126864 0.315041
2 1.328381 -0.889859 8.718869 12.653481 0.315041
3 0.658476 -0.416098 0.781585 12.540622 0.315041
4 0.386032 -0.922912 9.923636 13.898736 0.315041
#Kmeans
#导入工具包
import time
import pyspark.ml.clustering as clu
# 获取程序运行前时间
start = time.time()
#通过上图观察可知最好的簇数量为5
#初始化Kmeans模型
kmodel = clu.KMeans(k=5, initMode='k-means||', initSteps=10,maxIter=300, seed=0)
kmodel.fit(dataZscore)
# # 获取程序运行结束后时间
# end = time.time()
# print(f"使用K-means聚类算法的运行时间为:%.3fs" % (end-start))
# # 聚类算法的评价指标CH值的计算:
# score = calinski_harabaz_score(dataZscore, labels)# 至越大表示聚类效果越好
# print("CH值:", score)
一直报错,没办法解决
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-87-01ba619cc4ab> in <module>()
16 # kmodel = KMeans(k=5 ,maxIter=300, initSteps=10, initMode='k-means||', seed=0)
17 kmodel = clu.KMeans(k=5, initMode='k-means||', initSteps=10,maxIter=300, seed=0)
---> 18 kmodel.fit(dataZscore)
19 # kmodel = KMeans(k=5 ,maxIter=300)
20 # kmodel.fit(dataZscore)
~/hadoop/spark/python/pyspark/ml/base.py in fit(self, dataset, params)
159 return self.copy(params)._fit(dataset)
160 else:
--> 161 return self._fit(dataset)
162 else:
163 raise ValueError("Params must be either a param map or a list/tuple of param maps, "
~/hadoop/spark/python/pyspark/ml/wrapper.py in _fit(self, dataset)
333
334 def _fit(self, dataset):
--> 335 java_model = self._fit_java(dataset)
336 model = self._create_model(java_model)
337 return self._copyValues(model)
~/hadoop/spark/python/pyspark/ml/wrapper.py in _fit_java(self, dataset)
330 """
331 self._transfer_params_to_java()
--> 332 return self._java_obj.fit(dataset._jdf)
333
334 def _fit(self, dataset):
~/anaconda3/lib/python3.6/site-packages/pandas/core/generic.py in __getattr__(self, name)
4370 if self._info_axis._can_hold_identifiers_and_holds_name(name):
4371 return self
-> 4372 return object.__getattribute__(self, name)
4373
4374 def __setattr__(self, name, value):
AttributeError: 'DataFrame' object has no attribute '_jdf'
'DataFrame' object has no attribute '_jdf' 参考文章:https://stackoverflow.com/questions/55604506/error-attributeerror-dataframe-object-has-no-attribute-jdf
DataFrame 对象没有 _jdf 属性,将 DataFrame 对象转换为 Spark 对象试试看?
from pyspark.sql import SQLContext
sc = SparkContext.getOrCreate()
sqlContext = SQLContext(sc)
spark_dff = sqlContext.createDataFrame(panada_df)
Twilight6 发表于 2022-4-29 20:58
参考文章:https://stackoverflow.com/questions/55604506/error-attributeerror-dataframe-object-has-no- ...
这个解决了,但是又报新错
IllegalArgumentException Traceback (most recent call last)
<ipython-input-53-ce53578cdb27> in <module>()
12 # kmodel = KMeans(k=5 ,maxIter=300, initSteps=10, initMode='k-means||', seed=0)
13 kmodel = clu.KMeans(k=5, initMode='k-means||', initSteps=10,maxIter=300, seed=0)
---> 14 kmodel.fit(spark_df)
15 # kmodel = KMeans(k=5 ,maxIter=300)
16 # kmodel.fit(dataZscore)
~/hadoop/spark/python/pyspark/ml/base.py in fit(self, dataset, params)
159 return self.copy(params)._fit(dataset)
160 else:
--> 161 return self._fit(dataset)
162 else:
163 raise ValueError("Params must be either a param map or a list/tuple of param maps, "
~/hadoop/spark/python/pyspark/ml/wrapper.py in _fit(self, dataset)
333
334 def _fit(self, dataset):
--> 335 java_model = self._fit_java(dataset)
336 model = self._create_model(java_model)
337 return self._copyValues(model)
~/hadoop/spark/python/pyspark/ml/wrapper.py in _fit_java(self, dataset)
330 """
331 self._transfer_params_to_java()
--> 332 return self._java_obj.fit(dataset._jdf)
333
334 def _fit(self, dataset):
~/hadoop/spark/python/lib/py4j-0.10.9-src.zip/py4j/java_gateway.py in __call__(self, *args)
1303 answer = self.gateway_client.send_command(command)
1304 return_value = get_return_value(
-> 1305 answer, self.gateway_client, self.target_id, self.name)
1306
1307 for temp_arg in temp_args:
~/hadoop/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
115 # Hide where the exception came from that shows a non-Pythonic
116 # JVM exception message.
--> 117 raise converted from None
118 else:
119 raise
IllegalArgumentException: features does not exist. Available: ZL, ZR, ZF, ZM, ZC 澍梵. 发表于 2022-4-29 22:07
这个解决了,但是又报新错
这不太清楚了 Twilight6 发表于 2022-4-29 22:09
这不太清楚了
好的吧,感谢了!
页:
[1]