鱼C论坛

 找回密码
 立即注册
查看: 998|回复: 5

tf.add()的一个奇怪报错

[复制链接]
发表于 2020-8-6 17:09:01 | 显示全部楼层 |阅读模式

马上注册,结交更多好友,享用更多功能^_^

您需要 登录 才可以下载或查看,没有账号?立即注册

x
本帖最后由 猪仔很忙 于 2020-8-7 18:12 编辑

执行以下代码,为什么会输出两次perturb?而且第二次的类型为什么变成None了?
  1. import sys
  2. sys.path.append('../')
  3. from keras import Input
  4. from keras.layers import Dense
  5. import keras.backend as K
  6. from keras.layers import Lambda
  7. from keras.layers import LSTM
  8. import tensorflow as tf

  9. class RNN_SEPARATE_2(object):

  10.     def __init__(self,
  11.                  time_step=64,
  12.                  fearure_dim=2,
  13.                  hidden_size=256, # 128
  14.                  dropout_rate=0.2,
  15.                  name='rnn'):
  16.         self.name = name
  17.         self.time_step = time_step
  18.         self.fearure_dim = fearure_dim
  19.         self.hidden_size = hidden_size
  20.         self.dropout_rate = dropout_rate

  21.     def __call__(self, inputs):
  22.         x = inputs
  23.         x_1 = Lambda(lambda x: x[:,:,0])(x)
  24.         x_1 = Lambda(lambda x: K.expand_dims(x, axis=-1))(x_1)
  25.         x_2 = Lambda(lambda x: x[:,:,1:])(x)

  26.         h_2 = LSTM(self.hidden_size, return_sequences=True)(x_2) # batch, seq2, dim
  27.         h_2_first = Lambda(lambda x: x[:,1:,])(h_2)
  28.         h_2_last = Lambda(lambda x: x[:,-1,:])(h_2)

  29.         h_1 = LSTM(self.hidden_size, return_sequences=True)(x_1) # batch, seq1, dim
  30.         h_1_first = Lambda(lambda x:x[:1:,:])(h_1)
  31.         h_1_last = Lambda(lambda x: x[:,-1,:])(h_1) # batch, dim

  32.         h = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=-1))([h_1, h_2]) # batch, seq, dim
  33.         h = LSTM(self.hidden_size, return_sequences=True)(h) # batch, seq, dim
  34.         h = LSTM(self.hidden_size, return_sequences=True)(h) # batch, seq, dim
  35.         h_first = Lambda(lambda x: x[:,:-1,:])(h) # batch, seq-1, dim
  36.         h_last = Lambda(lambda x: x[:,-1,:])(h) # batch, dim

  37.         y = Lambda(lambda x:x[0]+x[1]+x[2])([h_1_last, h_2_last, h_last])
  38.         return y

  39. def get_adversary(args):
  40.     y_true = args[0]
  41.     y_pred = args[1]
  42.     v_final = args[2]
  43.     pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
  44.     pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
  45.     loss = -K.mean(0.75 * K.pow(1. - pt_1, 0) * K.log(pt_1)) - K.mean((1 - 0.75) * K.pow(pt_0, 0) * K.log(1. - pt_0))
  46.     perturb = tf.gradients(loss, [v_final])[0]
  47.     print(perturb)
  48.     v_final_adv = tf.add(v_final, perturb)
  49.     return v_final_adv

  50. time_step = 10
  51. feature_dim = 7
  52. trace_input = Input(shape=(time_step, feature_dim))
  53. label_input = Input(shape=(1,))
  54. v_final = RNN_SEPARATE_2(time_step, feature_dim)(trace_input)
  55. pred = Dense(1, activation='sigmoid', name='pred')(v_final)
  56. v_final_adv = Lambda(get_adversary)([label_input, pred, v_final])
复制代码


输出结果:
  1. Tensor("lambda_12/gradients/pred/MatMul_grad/MatMul:0", shape=(?, 256), dtype=float32)
  2. None
复制代码


报错信息:
  1. Traceback (most recent call last):
  2.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 527, in _apply_op_helper
  3.     preferred_dtype=default_dtype)
  4.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\ops.py", line 1224, in internal_convert_to_tensor
  5.     ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  6.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\constant_op.py", line 305, in _constant_tensor_conversion_function
  7.     return constant(v, dtype=dtype, name=name)
  8.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\constant_op.py", line 246, in constant
  9.     allow_broadcast=True)
  10.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\constant_op.py", line 284, in _constant_impl
  11.     allow_broadcast=allow_broadcast))
  12.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\tensor_util.py", line 454, in make_tensor_proto
  13.     raise ValueError("None values not supported.")
  14. ValueError: None values not supported.
  15. During handling of the above exception, another exception occurred:
  16. Traceback (most recent call last):
  17.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 541, in _apply_op_helper
  18.     values, as_ref=input_arg.is_ref).dtype.name
  19.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\ops.py", line 1224, in internal_convert_to_tensor
  20.     ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  21.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\constant_op.py", line 305, in _constant_tensor_conversion_function
  22.     return constant(v, dtype=dtype, name=name)
  23.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\constant_op.py", line 246, in constant
  24.     allow_broadcast=True)
  25.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\constant_op.py", line 284, in _constant_impl
  26.     allow_broadcast=allow_broadcast))
  27.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\tensor_util.py", line 454, in make_tensor_proto
  28.     raise ValueError("None values not supported.")
  29. ValueError: None values not supported.
  30. During handling of the above exception, another exception occurred:
  31. Traceback (most recent call last):
  32.   File "<input>", line 1, in <module>
  33.   File "D:\JetBrains\PyCharm 2020.1.2\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile
  34.     pydev_imports.execfile(filename, global_vars, local_vars)  # execute the script
  35.   File "D:\JetBrains\PyCharm 2020.1.2\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
  36.     exec(compile(contents+"\n", file, 'exec'), glob, loc)
  37.   File "D:/PycharmProjects/turningpoint-master/main.py", line 67, in <module>
  38.     modelName=modelName)
  39.   File "D:\PycharmProjects\turningpoint-master\baseline_main_rnn.py", line 37, in train_rnn_turning_point
  40.     clf = rnn_turning_point.build_train(datas, machineID, modelName)
  41.   File "D:\PycharmProjects\turningpoint-master\model\rnn_turning_point.py", line 56, in build_train
  42.     v_final_adv = Lambda(get_adversary)([label_input, pred, v_final])
  43.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\keras\engine\base_layer.py", line 474, in __call__
  44.     output_shape = self.compute_output_shape(input_shape)
  45.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\keras\layers\core.py", line 649, in compute_output_shape
  46.     x = self.call(xs)
  47.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\keras\layers\core.py", line 687, in call
  48.     return self.function(inputs, **arguments)
  49.   File "D:\PycharmProjects\turningpoint-master\model\rnn_turning_point.py", line 37, in get_adversary
  50.     v_final_adv = tf.add(v_final, perturb)
  51.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\ops\gen_math_ops.py", line 386, in add
  52.     "Add", x=x, y=y, name=name)
  53.   File "D:\Anaconda3\envs\edogawaAi\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 545, in _apply_op_helper
  54.     (input_name, err))
  55. ValueError: Tried to convert 'y' to a tensor and failed. Error: None values not supported.
复制代码
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

发表于 2020-8-6 17:12:33 | 显示全部楼层
无能为力
小甲鱼最新课程 -> https://ilovefishc.com
回复 支持 反对

使用道具 举报

发表于 2020-8-6 17:41:27 | 显示全部楼层
代码都不完整...我们测试不了
小甲鱼最新课程 -> https://ilovefishc.com
回复 支持 反对

使用道具 举报

 楼主| 发表于 2020-8-6 17:55:56 | 显示全部楼层
Twilight6 发表于 2020-8-6 17:41
代码都不完整...我们测试不了

抱歉..以下是代码:
  1. import sys
  2. sys.path.append('../')
  3. from keras import Input
  4. from keras.layers import Dense
  5. import keras.backend as K
  6. from keras.layers import Lambda
  7. from keras.layers import LSTM
  8. import tensorflow as tf

  9. class RNN_SEPARATE_2(object):

  10.     def __init__(self,
  11.                  time_step=64,
  12.                  fearure_dim=2,
  13.                  hidden_size=256, # 128
  14.                  dropout_rate=0.2,
  15.                  name='rnn'):
  16.         self.name = name
  17.         self.time_step = time_step
  18.         self.fearure_dim = fearure_dim
  19.         self.hidden_size = hidden_size
  20.         self.dropout_rate = dropout_rate

  21.     def __call__(self, inputs):
  22.         x = inputs
  23.         x_1 = Lambda(lambda x: x[:,:,0])(x)
  24.         x_1 = Lambda(lambda x: K.expand_dims(x, axis=-1))(x_1)
  25.         x_2 = Lambda(lambda x: x[:,:,1:])(x)

  26.         h_2 = LSTM(self.hidden_size, return_sequences=True)(x_2) # batch, seq2, dim
  27.         h_2_first = Lambda(lambda x: x[:,1:,])(h_2)
  28.         h_2_last = Lambda(lambda x: x[:,-1,:])(h_2)

  29.         h_1 = LSTM(self.hidden_size, return_sequences=True)(x_1) # batch, seq1, dim
  30.         h_1_first = Lambda(lambda x:x[:1:,:])(h_1)
  31.         h_1_last = Lambda(lambda x: x[:,-1,:])(h_1) # batch, dim

  32.         h = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=-1))([h_1, h_2]) # batch, seq, dim
  33.         h = LSTM(self.hidden_size, return_sequences=True)(h) # batch, seq, dim
  34.         h = LSTM(self.hidden_size, return_sequences=True)(h) # batch, seq, dim
  35.         h_first = Lambda(lambda x: x[:,:-1,:])(h) # batch, seq-1, dim
  36.         h_last = Lambda(lambda x: x[:,-1,:])(h) # batch, dim

  37.         y = Lambda(lambda x:x[0]+x[1]+x[2])([h_1_last, h_2_last, h_last])
  38.         return y

  39. def gradient_operation(args):
  40.     y_true = args[0]
  41.     y_pred = args[1]
  42.     v_final = args[2]
  43.     pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
  44.     pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
  45.     loss = -K.mean(0.75 * K.pow(1. - pt_1, 0) * K.log(pt_1)) - K.mean((1 - 0.75) * K.pow(pt_0, 0) * K.log(1. - pt_0))
  46.     perturb = tf.gradients(loss, [v_final])[0]
  47.     print(perturb)
  48.     v_final_adv = tf.add(v_final, perturb)
  49.     return v_final_adv

  50. time_step = 10
  51. feature_dim = 7
  52. trace_input = Input(shape=(time_step, feature_dim))
  53. label_input = Input(shape=(1,))
  54. v_final = RNN_SEPARATE_2(time_step, feature_dim)(trace_input)
  55. pred = Dense(1, activation='sigmoid', name='pred')(v_final)
  56. v_final_adv = Lambda(gradient_operation)([label_input, pred, v_final])
复制代码
小甲鱼最新课程 -> https://ilovefishc.com
回复 支持 反对

使用道具 举报

 楼主| 发表于 2020-8-6 18:09:08 | 显示全部楼层
我用的tensorflow版本是1.14.0,keras是2.2.4
小甲鱼最新课程 -> https://ilovefishc.com
回复 支持 反对

使用道具 举报

发表于 2020-8-7 10:20:43 | 显示全部楼层
猪仔很忙 发表于 2020-8-6 18:09
我用的tensorflow版本是1.14.0,keras是2.2.4

更加无能为力
小甲鱼最新课程 -> https://ilovefishc.com
回复 支持 反对

使用道具 举报

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

小黑屋|手机版|Archiver|鱼C工作室 ( 粤ICP备18085999号-1 | 粤公网安备 44051102000585号)

GMT+8, 2025-6-25 05:40

Powered by Discuz! X3.4

© 2001-2023 Discuz! Team.

快速回复 返回顶部 返回列表