|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
下面是我学习时候模仿别人的简单神经网络代码
import numpy as np
def tanh(x):
return np.tanh(x)
def tanh_deriv(x):
return 1 - np.tanh(x)*np.tanh(x)
def logistic(x):
return 1/(1+np.exp(-x))
def logistic_derivative(x):
return logistic(x)*(1-logistic(x))
#构造神经网络
class NeuralNetwork:
def __init__(self, layers, activation='tanh'):
if activation == 'logistic':
self.activation = logistic
self.activation_deriv = logistic_derivation
elif activation == 'tanh':
self.activation = tanh
self.activation_deriv = tanh_deriv
#建立初始权值向量 并向其中添加权值
self.weight = []
for i in range(1, len(layers)-1):
self.weights.append((2*np.random.random((layers[i-1] + 1, layers[i] + 1 ))-1)*0.25)
self.weights.append((2*np.random.random((layers[i] + 1, layers[i+1] + 1 ))-1)*0.25)
def fit(self, X, y, learning_rate=0.2, epochs=10000):
X = np.atleast_2d(X)
temp = np.ones(X.shape[0], X.shape[1]+1)
temp[:, 0:-1] = X #保留最后一行作为偏量bias
X = temp
y = np.array(y)
for k in range(epochs):
i = np.random.randint(X.shape[0])
a = [X[i]]
for l in range(len(self.weights)): #每一层的正向计算
a.append(self.activation(np.dot(a[l], self.weights[1])))
error = y[i] - a[-1] #在最高层计算误差
deltas = [error*self.activation_deriv(a[-1])]
for l in range(len(a) - 2, 0, -1): #开始误差反传,
deltas.append(deltas[-1].dot(self.weights[1].T)*self.activation)
deltas.reverse()
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(delats[i])
self.weights[i] += learning_rate*layer.dot(delta)
def predict(self, x):
x = np.array(x)
temp = np.ones(x.shape[o]+1)
temp[0:-1] = x
a = temp
for l in range(0, len(self.weights)):
a = self.activation(np.dot(a, self.weights[1]))
return a
然后在第二个程序中加以应用
from ann import NeuralNetwork
import numpy as np
nn = NeuralNetwork([2,2,1], 'tanh')
X = np.array([0,0], [0,1], [1,0], [1,1])
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in [[0,0], [0,1], [1,0], [1,1]]:
print(i, nn.predict(i))
两段代码完全模仿了教程里面,但是我这里报错了
AttributeError: 'NeuralNetwork' object has no attribute 'weights'
希望请教下神通广大的鱼友们!!! |
|