|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import numpy as np
- #y=3*x0 + 2 * (x1) + (x2)
- # x0 = 1
- x_train = np.array([ [1, 2], [2, 1], [2, 3], [3, 5], [1, 3], [4, 2], [7, 3], [4, 5], [11, 3], [8, 7] ])
- y_train = np.array([[7, 8, 10, 14, 8, 13, 20, 16, 28, 26]])
- x_test = np.array([ [1, 4], [2, 2], [2, 5], [5, 3], [1, 5], [4, 1] ])
- ###最小二乘法 系数 = (X.T * X)^-1 * X.T * Y
- def function1(x_train,y_train):
- m,n = x_train.shape
- x = np.ones((m,1))
- x_train = np.hstack((x,x_train)) ###增加x0 = 1这个特征
- x_train_T = x_train.T
- x_x_t = x_train_T.dot(x_train)
- x_x_t_ = np.linalg.inv(x_x_t)
- x_x_t_t = x_x_t_.dot(x_train_T)
- y_train = y_train.T
- theat = x_x_t_t.dot(y_train)
- return theat
- ###梯度下降法
- # rate表示学习速率,不可太大,也不可太小
- # theat表示初始化的系数,颗随便给
- # num表示有几组数据
- # minnum表示当原来的theat与求出的theat相差多少时,认为收敛
- def function2(x_train,y_train,rate,theat,num,minnum=0.000001):
- m,n = x_train.shape
- x = np.ones((m,1))
- x_train = np.hstack((x,x_train)) ###增加x0 = 1这个特征
- x_train_T = x_train.T
- y_train = y_train.T
- theat = theat.reshape(n+1,1)
- while True:
- y = x_train.dot(theat)
- y_ = (y - y_train).T
- de = y_.dot(x_train) * rate / num
- theat = theat - de.T
- f = abs(de) < minnum
- if f.all():
- break
- return theat
- theat = np.array([0,0,0])
- print function1(x_train,y_train)
- print function2(x_train,y_train,rate=0.01,theat=theat,num=10)
复制代码
Python 2.7
|
|