[机器学习]一元一次函数的梯度下降法拟合的python实现
import numpy as npimport random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
m = 1000
def f(x):
# return 8.0 * x + 9.0 + (-1.5 + random.random() * 3)
# return 8.0 * x + 9.0 + random.uniform(-1.5,1.5)
return 8.0*x+9.0+(-1000+random.random()*2000)
def E(a, b, P):
sum = 0
for p in P:
sum = sum + (p*a+b-p)**2
return sum / len(P)
def d_f_A(a, b, P):
sum = 0
for p in P:
sum = sum + 2*p*(a*p+b-p)
return sum / len(P)
def d_f_B(a, b, P):
sum = 0
for p in P:
sum = sum + 2*(b+a*p-p)
return sum / len(P)
P = []
X = []
Y = []
for i in range(1, m):
x = -1000 + random.random() * 2000
X.append(x)
y = f(x)
Y.append(y)
P.append()
learning_rate = 0.0000001
max_loop = 1000
tolerance = 0.01
a_init = random.random()*2
a = a_init
b_init = 8+random.random()*2
b = b_init
GDX =
GDY =
GDZ =
E_pre = 0
for i in range(max_loop):
d_f_a = d_f_A(a, b, P)
d_f_b = d_f_B(a, b, P)
a = a-learning_rate * d_f_a
b = b-learning_rate * d_f_b
GDX.append(a)
GDY.append(b)
E_cur = E(a, b, P)
GDZ.append(E_cur)
# print(x, y)
if abs(E_cur-E_pre)<tolerance:
break
E_pre = E_cur
print('a的初值为 =', a_init)
print('b的初值为 =', b_init)
print('拟合后 a=', a, 'b=', b)
print('E(a,b) =', E(a, b, P))
plt.scatter(X, Y)
xe = np.arange(-1000,1000)
plt.plot(xe,a*xe+b)
plt.show()
请问有没有二元函数带约束的梯度下降法例子。
页:
[1]