FiBiNET_tzld_rank.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. import numpy as np
  2. import pandas as pd
  3. import gc
  4. import os
  5. import time
  6. from sklearn.preprocessing import LabelEncoder
  7. from tensorflow.python.keras.preprocessing.sequence import pad_sequences
  8. from deepctr.feature_column import SparseFeat, VarLenSparseFeat, get_feature_names
  9. import tensorflow as tf
  10. from deepctr.models import *
  11. def split(x):
  12. key_ans = x.split('|')
  13. for key in key_ans:
  14. if key not in key2index:
  15. # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
  16. key2index[key] = len(key2index) + 1
  17. return list(map(lambda x: key2index[x], key_ans))
  18. if __name__ == "__main__":
  19. begin_time = time.time()
  20. data = pd.read_csv("/work/xielixun/dwa_sum_graphembedding_user_action_feature_app_20210225.csv")
  21. sparse_features = ["videoid", "mid",
  22. "videoGenre1", "videoGenre2", "userRatedVideo1", "userRatedVideo2", "userRatedVideo3", "userGenre1", "userGenre2", "userCity",
  23. "authorid", "userRealplayCount", "videoRealPlayCount", "videoDuration"]
  24. target = ['label']
  25. feature_max_idx = {}
  26. data = data[data["mid"] != "unknown"].copy()
  27. data["mid"].replace("unknown", "N000111111D", inplace=True)
  28. data = data[data["mid"] != "N000111111D"].copy()
  29. # 和上面函数的功能是一样的,见 deepMatch DSSM
  30. def add_index_column(param_df, column_name):
  31. values = list(param_df[column_name].unique())
  32. value_index_dict = {value: idx for idx, value in enumerate(values)}
  33. if column_name == "mid":
  34. param_df["uidx"] = param_df[column_name].copy()
  35. param_df["mid"] = param_df[column_name].map(value_index_dict)
  36. feature_max_idx["mid"] = param_df["mid"].max() + 1
  37. add_index_column(data, "mid")
  38. for column_name in sparse_features:
  39. lbe = LabelEncoder()
  40. print("\n\n-------------- " + column_name)
  41. print(data[column_name])
  42. if column_name == "videoGenre1" or column_name == "videoGenre2" or \
  43. column_name == "videoGenre3" or column_name == "userGenre1" or column_name == "userGenre2" or column_name == "userGenre3":
  44. data[column_name].fillna("社会", inplace=True)
  45. if column_name == "userCity":
  46. data[column_name].fillna("北京", inplace=True)
  47. if column_name == "mid":
  48. continue
  49. data[column_name] = lbe.fit_transform(data[column_name])
  50. feature_max_idx[column_name] = data[column_name].max() + 1
  51. key2index = {}
  52. print("\n\n ************ data process finish")
  53. user_video_list_df = data[data["label"] > 0].copy().groupby("mid")['videoid'].apply(list).reset_index()
  54. user_video_list_df.rename(columns={'videoid': 'hist_video_id'}, inplace=True)
  55. max_len = 50
  56. # print(list(user_video_length.keys()))
  57. # print(list(user_video_length.keys))
  58. mid_list = list(user_video_list_df["mid"])
  59. print(user_video_list_df["mid"])
  60. # print(mid_list)
  61. user_video_list_df["hist_len"] = user_video_list_df["hist_video_id"].apply(lambda x: len(x))
  62. print(user_video_list_df)
  63. print(len(user_video_list_df))
  64. emb_dim = 10
  65. fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique(), embedding_dim=emb_dim)
  66. for feat in sparse_features]
  67. print(fixlen_feature_columns)
  68. use_weighted_sequence = False
  69. if use_weighted_sequence:
  70. varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
  71. key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean',
  72. weight_name='genres_weight')] # Notice : value 0 is for padding for sequence input feature
  73. else:
  74. # varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
  75. # key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean',
  76. # weight_name=None)] # Notice : value 0 is for padding for sequence input feature
  77. varlen_feature_columns = [VarLenSparseFeat(SparseFeat('hist_video_id', vocabulary_size=feature_max_idx["videoid"], embedding_dim=emb_dim,
  78. embedding_name="videoid"), maxlen=max_len, combiner='mean',
  79. length_name="hist_len")] # Notice : value 0 is for padding for sequence input feature
  80. linear_feature_columns = fixlen_feature_columns + varlen_feature_columns
  81. dnn_feature_columns = fixlen_feature_columns + varlen_feature_columns
  82. print(dnn_feature_columns)
  83. feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
  84. df_merge = pd.merge(left=data,
  85. right=user_video_list_df,
  86. left_on="mid",
  87. right_on="mid",
  88. how="right")
  89. df_merge.head()
  90. print("df_merge len is: ", len(df_merge))
  91. df_merge = df_merge.sample(frac=1.0)
  92. del data, user_video_list_df
  93. gc.collect()
  94. print("after sample df_merge len is: ", len(df_merge))
  95. model_input = {name: df_merge[name] for name in sparse_features} #
  96. video_hist_seq_pad = pad_sequences(df_merge["hist_video_id"], maxlen=max_len, padding='post', truncating='post', value=0)
  97. model_input["hist_video_id"] = video_hist_seq_pad
  98. print("\n\n\n")
  99. print(video_hist_seq_pad)
  100. print("\n\nuser_vids_input len is: ", len(df_merge["hist_video_id"]))
  101. # model_input["genres_weight"] = np.random.randn(data.shape[0], max_len, 1)
  102. # model_input["hist_len"] = np.array(user_vids_len_input)
  103. model_input["hist_len"] = df_merge["hist_len"]
  104. print("\n\nuser_vids_len_input len is: ", len(df_merge["hist_len"]))
  105. model = FiBiNET(linear_feature_columns, dnn_feature_columns, task='binary')
  106. logdir = os.path.join("log_callbacks") # Tensorboard需要一个文件夹
  107. if not os.path.exists(logdir):
  108. os.mkdir(logdir)
  109. output_model_file = os.path.join(logdir,
  110. 'xdeepfm_model.h5')
  111. callbacks = [
  112. tf.keras.callbacks.TensorBoard(logdir),
  113. tf.keras.callbacks.ModelCheckpoint(output_model_file,
  114. save_best_only=True),
  115. tf.keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),
  116. ]
  117. METRICS = [
  118. tf.keras.metrics.TruePositives(name='tp'),
  119. tf.keras.metrics.FalsePositives(name='fp'),
  120. tf.keras.metrics.TrueNegatives(name='tn'),
  121. tf.keras.metrics.FalseNegatives(name='fn'),
  122. tf.keras.metrics.BinaryAccuracy(name='accuracy'),
  123. tf.keras.metrics.Precision(name='precision'),
  124. tf.keras.metrics.Recall(name='recall'),
  125. # tf.keras.metrics.AUC(name='auc'),
  126. tf.keras.metrics.AUC(name='auc-ROC', curve='ROC'),
  127. tf.keras.metrics.AUC(name='auc-PRC', curve='PR')
  128. ]
  129. model.compile(
  130. loss='binary_crossentropy',
  131. optimizer='adam',
  132. metrics=METRICS
  133. )
  134. model.fit(model_input, df_merge[target].values,
  135. batch_size=2048, epochs=5,
  136. verbose=2, validation_split=0.2, callbacks=callbacks)
  137. model.save("./tensorflow_xdeepfm-0325-tzld-2.h5")
  138. tf.keras.models.save_model(model,
  139. "file:///work/xielixun/xDeepFM0325/tensorflow_xdeepfm-0325-tzld/001",
  140. overwrite=True,
  141. include_optimizer=True,
  142. save_format=None,
  143. signatures=None,
  144. options=None)
  145. print("******* train FiBiNET cost time is: " + str(time.time() - begin_time))