| 
 | 
 
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册  
 
x
 
 本帖最后由 糖逗 于 2020-12-26 20:54 编辑  
 
论文:https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf 
 
参考:https://zhuanlan.zhihu.com/p/170607706 
 
说明:待验证 
 
- import tensorflow as tf
 
 - from tensorflow.keras import layers, optimizers
 
 - from tensorflow import keras
 
 - from sklearn.model_selection import train_test_split
 
 - import numpy as np
 
  
- #将数据划分为测试集和训练集
 
 - def preprocess(x, y):
 
 -     x = tf.cast(x, dtype = tf.float64)
 
 -     y = tf.cast(y, dtype = tf.int64)
 
 -     return x, y
 
  
- from sklearn.datasets import load_breast_cancer
 
 - from sklearn.model_selection import train_test_split
 
 - data = load_breast_cancer()
 
 - x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2,
 
 -                                                         random_state = 11, stratify = data.target)
 
 - print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
 
  
- train_db = tf.data.Dataset.from_tensor_slices((np.array(x_train), y_train))
 
 - train_db = train_db.shuffle(123).map(preprocess).batch(20)
 
  
- test_db = tf.data.Dataset.from_tensor_slices((np.array(x_test), y_test))
 
 - test_db = test_db.map(preprocess).batch(20)
 
  
- sample = next(iter(train_db))
 
 - print('sample:', sample[0].shape, sample[1].shape,
 
 -       tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))
 
  复制代码 
 
- class FFM(keras.Model):
 
 -     def __init__(self, field_num, feature_field_dict, dim_num, k = 2):
 
 -         super(FFM, self).__init__()
 
 -         self.field_num = field_num
 
 -         self.k = k
 
 -         self.feature_field_dict = feature_field_dict
 
 -         self.dim_num = dim_num
 
  
-     def build(self, input_shape):
 
 -         self.fc = tf.keras.layers.Dense(units = 1,
 
 -                                   bias_regularizer = tf.keras.regularizers.l2(0.01),
 
 -                                   kernel_regularizer = tf.keras.regularizers.l1(0.02))
 
 -         self.w = self.add_weight(shape = (input_shape[-1], self.field_num, self.k),
 
 -                                       initializer = 'glorot_uniform',
 
 -                                       trainable = True)
 
 -         super(FFM, self).build(input_shape)
 
 -         
 
 -     def call(self, x, training):
 
 -         linear = self.fc(x)
 
 -         temp = tf.cast(0, tf.float32)
 
 -         temp = tf.expand_dims(temp, axis = 0)
 
 -         for j1 in range(self.dim_num):
 
 -             for j2 in range(j1 + 1, self.dim_num):
 
 -                 f1 = self.feature_field_dict[j2]
 
 -                 f2 = self.feature_field_dict[j1]
 
 -                 #[, , k] * [, , k] = [, , k] -> [1, k]
 
 -                 ww = tf.expand_dims(tf.multiply(self.w[j1, f2, :], self.w[j2, f1, :]), axis = 0)
 
 -                 #[x, ] * [x, ] = [x, ] -> [x, 1]
 
 -                 xx = tf.expand_dims(tf.multiply(x[:, j1], x[:, j2]),axis = 1)
 
 -                 #[x, 1] @ [1, k] = [x, k]
 
 -                 store = tf.matmul(xx, ww)
 
 -                 #[x, k] -> [x]
 
 -                 temp += tf.reduce_mean(store, keepdims = True, axis = 1)
 
 -         out = layers.Add()([linear, temp])
 
 -         return tf.sigmoid(out)
 
  
- store = {}   
 
 - for i in range(30):
 
 -         store[i] = int(i / 15)
 
 - model = FFM(field_num = 2, feature_field_dict = store, dim_num = 30)
 
 - model.build((None, 30))
 
 - model.summary()
 
  复制代码 
 
- def main():
 
 -     store = {}
 
 -     for i in range(30):
 
 -         store[i] = int(i / 15) #实际要根据数据字段含义定义,这里只是做一个随意的分组
 
 -     model = FFM(field_num = 2, feature_field_dict = store, dim_num = 30)
 
 -     optimizer = optimizers.Adam(lr = 1e-2)
 
 -     for epoch in range(50):
 
 -         for step, (x,y) in enumerate(train_db):
 
 -             with tf.GradientTape() as tape:
 
 -                 logits = model(x,training=True)
 
 -                 loss = tf.reduce_mean(tf.losses.binary_crossentropy(y, logits))
 
 -                 loss_regularization = []
 
 -                 for i in model.trainable_variables:
 
 -                     loss_regularization.append(tf.nn.l2_loss(i))
 
 -                 loss_regularization = tf.reduce_sum(tf.stack(loss_regularization))
 
 -                 loss = 0.001 * loss_regularization + loss
 
 -             grads = tape.gradient(loss, model.trainable_variables)
 
 -             optimizer.apply_gradients(zip(grads, model.trainable_variables))
 
 -             print(epoch, step, 'loss:', float(loss))
 
 -             
 
 -         total_num = 0
 
 -         total_correct = 0
 
 -         for x,y in test_db:
 
 -             pred = model(x, training=False)
 
 -             pred = tf.squeeze(pred)
 
 -             pred = pred > 0.5
 
 -             pred = tf.cast(pred, dtype = tf.int64)
 
 -             correct = tf.cast(tf.equal(pred, y), tf.int64)
 
 -             correct = tf.reduce_sum(correct)
 
 -             total_num += x.shape[0]
 
 -             total_correct += int(correct)
 
 -         acc = total_correct / total_num
 
 -         print(epoch, 'acc:', acc)
 
 -         print("-"*25)
 
 -         
 
 - if __name__ == '__main__':
 
 -     main()
 
  复制代码 |   
 
 
 
 |