|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
我从网上抄了一个代码,但是其中不包含保存模型与加载模型的步骤,那么在这个代码中,我是应该保存W和b的值,还是A和Z的值作为训练模型之后后的保存数据,wb和az各自都是什么,cache又是什么
还有一个问题
# 各层网络神经元的参数
layers = [feature_num, 20, 15, 1]
L = len(layers)
我在定义这个[feature_num, 20, 15, 1]的时候,这四个数据是不是表示我有一个输入层两个含隐藏以及一个输出层吗
class NN():
def __init__(self, layers, data):
L = len(layers) # 这个layers是个包含了输入层到输出层的各层神经元个数的列表
# 设置神经元权重参数和偏置参数
self.W = {}
self.b = {}
for l in range(1, L):
# print(layers[l])
self.W["W" + str(l)] = np.mat(np.random.randn(layers[l], layers[l - 1]) * 0.1)
self.b["b" + str(l)] = np.mat(np.random.randn(layers[l], 1) * 0.1)
self.A = {} # 这个字典里存储了输入,和后面各个层的激活值;输出层的激活值在这个字典的最后一个
self.Z = {} # 这个字典里存储了从第二层到输出层的没被激活的值
self.cache = {} # 这个字典里存储了https://www.cnblogs.com/pinard/p/6422831.html里面各个层δl的值,第一个是输出层的δ
# 将数据写入类中
self.data = data
def forward_activation_02(self, L, flag): # 这个L是和上面的L一样的,包含了总共层数
# 初始化输入
self.A["A0"] = self.inputs
for l in range(1, L):
if flag == 0 or l == 1 or l == L - 1:
self.Z["Z" + str(l)] = self.W["W" + str(l)] * self.A["A" + str(l - 1)] + self.b["b" + str(l)]
self.A["A" + str(l)] = sigmoid(self.Z["Z" + str(l)])
else:
# 启用dropout正则化
self.d = np.random.rand(self.A["A" + str(l - 1)].shape[0], self.A["A" + str(l - 1)].shape[1])
self.d = self.d < keep_prob
self.A["A" + str(l - 1)] = np.multiply(self.A["A" + str(l - 1)], self.d)
self.A["A" + str(l - 1)] /= keep_prob
self.Z["Z" + str(l)] = self.W["W" + str(l)] * self.A["A" + str(l - 1)] + self.b["b" + str(l)]
self.A["A" + str(l)] = sigmoid(self.Z["Z" + str(l)])
# 更新cache
for l in reversed(range(1, L)):
if l == L - 1:
self.cache["C" + str(l)] = np.multiply(self.A["A" + str(l)] - self.output,
dsigmoid(self.Z["Z" + str(l)]))
else:
self.cache["C" + str(l)] = np.multiply(self.W["W" + str(l + 1)].T * self.cache["C" + str(l + 1)],
dsigmoid(self.Z["Z" + str(l)]))
err = np.abs(self.A["A" + str(L - 1)] - self.output)
return err
def backPropagate_02(self, learning_rate, L): # 反向传播
alpha = learning_rate
m = self.inputs.shape[1] # 这是样本数
for i in range(L):
l = L - i - 1
if l > 0:
self.b['b' + str(l)] = self.b['b' + str(l)] - alpha * 1.0 / m * (
self.cache["C" + str(l)] * np.ones((m, 1)))
self.W['W' + str(l)] = self.W['W' + str(l)] - alpha * 1.0 / m * (
self.cache["C" + str(l)] * self.A["A" + str(l - 1)].T)
def init_prameter(self, batch_size):
# 每次批量训练batch_size幅图像
train_dataloader = DataLoader(self.data, batch_size=batch_size, drop_last=False, shuffle=True)
for train_data in train_dataloader:
imgs = train_data.numpy()
batch_xs, batch_ys = imgs[:, :feature_num], imgs[:, feature_num]
self.inputs = np.mat(batch_xs).transpose()
self.output = np.mat(sigmoid(batch_ys))
def train(self, iterations, learning_rate, L, batch_size, train_shape):
# 批处理训练
for i in range(iterations):
if (train_shape % batch_size) != 0:
print("batch没选对,要能够被train除进")
for j in range(int(train_shape / batch_size)):
self.init_prameter(batch_size)
err = self.forward_activation_02(L, 1)
self.backPropagate_02(learning_rate, L)
#############################每训练一轮就在测试集上进行测试
asdf, aswe = test_data[:, :feature_num], test_data[:, feature_num]
self.inputs = np.mat(asdf).transpose()
self.output = np.mat(sigmoid(aswe))
test_num = self.output.shape[1]
test_loss = self.forward_activation_02(L, 0)
print("在训练集上,第", i, "轮迭代误差为", np.sum(err) / batch_size, "在测试集上,第", i, "轮迭代误差为",
np.sum(test_loss) / test_num)
if np.sum(test_loss) / test_num < 0.02 or i == (iterations - 1):
true_ = self.output.transpose()
pred_ = self.A["A" + str(L - 1)].transpose()
print("测试样本的实际结果:", true_)
print("测试样本的预测结果:", pred_)
plt.plot(pred_, label="pred") # 绘制预测关系图
plt.legend()
plt.plot(true_, label="true") # 绘制预测关系图
plt.legend()
plt.show()
break
return true_, pred_ |
|