widedeep_v13_1.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #! /usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # vim:fenc=utf-8
  4. #
  5. # Copyright © 2025 StrayWarrior <i@straywarrior.com>
  6. #
  7. # Distributed under terms of the MIT license.
  8. """
  9. 1.删除容易导致偏差的viewall特征
  10. 2.删除分桶不均匀的cpa特征
  11. 3.减少dense特征
  12. 4.增加U-I交叉统计
  13. 5.增加线性部分dense
  14. """
  15. raw_input = open("data_fields_v3.config").readlines()
  16. input_fields = dict(
  17. map(lambda x: (x[0], x[1]),
  18. map(lambda x: x.strip().split(' '), raw_input)))
  19. def read_features(filename, excludes=None):
  20. features = open(filename).readlines()
  21. features = [name.strip().lower() for name in features]
  22. if excludes:
  23. for x in excludes:
  24. if x in features:
  25. features.remove(x)
  26. return features
  27. exclude_features = ['viewall', 'cpa']
  28. dense_features = read_features("features_top300.config", exclude_features)
  29. top_dense_features = read_features('features_top50.config', exclude_features)
  30. sparse_features = [
  31. "cid", "adid", "adverid",
  32. "region", "city", "brand",
  33. "vid", "cate1", "cate2",
  34. "apptype", "hour", "hour_quarter", "root_source_scene", "root_source_channel", "is_first_layer", "title_split", "user_has_conver_1y",
  35. "user_adverid_view_3d", "user_adverid_view_7d", "user_adverid_view_30d",
  36. "user_adverid_click_3d", "user_adverid_click_7d", "user_adverid_click_30d",
  37. "user_adverid_conver_3d", "user_adverid_conver_7d", "user_adverid_conver_30d",
  38. "user_skuid_view_3d", "user_skuid_view_7d", "user_skuid_view_30d",
  39. "user_skuid_click_3d", "user_skuid_click_7d", "user_skuid_click_30d",
  40. "user_skuid_conver_3d", "user_skuid_conver_7d", "user_skuid_conver_30d"
  41. ]
  42. tag_features = [
  43. "user_vid_return_tags_2h", "user_vid_return_tags_1d", "user_vid_return_tags_3d",
  44. "user_vid_return_tags_7d", "user_vid_return_tags_14d"
  45. ]
  46. seq_features = [
  47. "user_cid_click_list", "user_cid_conver_list"
  48. ]
  49. input_type_map = {
  50. 'BIGINT': 'INT64',
  51. 'DOUBLE': 'DOUBLE',
  52. 'STRING': 'STRING'
  53. }
  54. print("""train_config {
  55. optimizer_config {
  56. adam_optimizer {
  57. learning_rate {
  58. constant_learning_rate {
  59. learning_rate: 0.0010
  60. }
  61. }
  62. }
  63. use_moving_average: false
  64. }
  65. optimizer_config {
  66. adam_optimizer {
  67. learning_rate {
  68. constant_learning_rate {
  69. learning_rate: 0.0006
  70. }
  71. }
  72. }
  73. use_moving_average: false
  74. }
  75. optimizer_config {
  76. adam_optimizer {
  77. learning_rate {
  78. constant_learning_rate {
  79. learning_rate: 0.002
  80. }
  81. }
  82. }
  83. use_moving_average: false
  84. }
  85. num_steps: 200000
  86. sync_replicas: true
  87. save_checkpoints_steps: 1100
  88. log_step_count_steps: 100
  89. save_summary_steps: 100
  90. }
  91. eval_config {
  92. metrics_set {
  93. auc {
  94. }
  95. }
  96. eval_online: true
  97. eval_interval_secs: 120
  98. }
  99. data_config {
  100. batch_size: 512
  101. num_epochs: 1
  102. """)
  103. for name in input_fields:
  104. input_type = input_type_map[input_fields[name]]
  105. default_spec = ''
  106. if name in dense_features:
  107. default_spec = '\n default_val: "0"'
  108. print(f""" input_fields {{
  109. input_name: "{name}"
  110. input_type: {input_type}{default_spec}
  111. }}""")
  112. # default_val: "0"
  113. print(""" label_fields: "has_conversion"
  114. prefetch_size: 32
  115. input_type: OdpsInputV2
  116. }
  117. """)
  118. for name in dense_features:
  119. print(f"""feature_configs {{
  120. input_names: "{name}"
  121. feature_type: RawFeature
  122. boundaries: [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1.0]
  123. embedding_dim: 6
  124. }}""")
  125. for name in sparse_features:
  126. print(f"""feature_configs {{
  127. input_names: "{name}"
  128. feature_type: IdFeature
  129. hash_bucket_size: 1000000
  130. embedding_dim: 6
  131. }}""")
  132. for name in tag_features + seq_features:
  133. print(f"""feature_configs {{
  134. input_names: "{name}"
  135. feature_type: TagFeature
  136. hash_bucket_size: 1000000
  137. embedding_dim: 6
  138. separator: ','
  139. }}""")
  140. def wide_and_deep():
  141. print("""
  142. model_config {
  143. model_class: "WideAndDeep"
  144. feature_groups: {
  145. group_name: 'wide'""")
  146. for name in dense_features + sparse_features:
  147. print(f""" feature_names: '{name}'""")
  148. print(""" wide_deep: WIDE
  149. }
  150. feature_groups: {
  151. group_name: 'deep'""")
  152. for name in dense_features + sparse_features + tag_features + seq_features:
  153. print(f""" feature_names: '{name}'""")
  154. print(""" wide_deep: DEEP
  155. }
  156. wide_and_deep {
  157. wide_output_dim: 8
  158. dnn {
  159. hidden_units: [256, 128, 64]
  160. }
  161. final_dnn {
  162. hidden_units: [64, 32]
  163. }
  164. l2_regularization: 1e-5
  165. }
  166. embedding_regularization: 1e-6
  167. }""")
  168. def deep_fm():
  169. print("""
  170. model_config {
  171. model_class: "DeepFM"
  172. feature_groups: {
  173. group_name: 'wide'""")
  174. for name in dense_features + sparse_features:
  175. print(f""" feature_names: '{name}'""")
  176. print(""" wide_deep: WIDE
  177. }
  178. feature_groups: {
  179. group_name: 'deep'""")
  180. for name in top_dense_features + sparse_features + tag_features + seq_features:
  181. print(f""" feature_names: '{name}'""")
  182. print(""" wide_deep: DEEP
  183. }
  184. deepfm {
  185. wide_output_dim: 8
  186. dnn {
  187. hidden_units: [256, 128, 64]
  188. }
  189. final_dnn {
  190. hidden_units: [64, 32]
  191. }
  192. l2_regularization: 1e-5
  193. }
  194. embedding_regularization: 1e-6
  195. }""")
  196. def fm():
  197. print("""
  198. model_config {
  199. model_class: "FM"
  200. feature_groups: {
  201. group_name: 'wide'""")
  202. for name in dense_features:
  203. print(f""" feature_names: '{name}'""")
  204. print(""" wide_deep: WIDE
  205. }
  206. feature_groups: {
  207. group_name: 'deep'""")
  208. for name in dense_features:
  209. print(f""" feature_names: '{name}'""")
  210. print(""" wide_deep: DEEP
  211. }
  212. fm {
  213. }
  214. embedding_regularization: 1e-5
  215. }""")
  216. def config_export():
  217. print("""
  218. export_config {
  219. exporter_type: "final"
  220. }
  221. """)
  222. deep_fm()
  223. config_export()