dygraph_model.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. import math
  18. import net
  19. class DygraphModel():
  20. # define model
  21. def create_model(self, config):
  22. sparse_feature_number = config.get(
  23. "hyper_parameters.sparse_feature_number")
  24. sparse_feature_dim = config.get("hyper_parameters.sparse_feature_dim")
  25. fc_sizes = config.get("hyper_parameters.fc_sizes")
  26. sparse_fea_num = config.get('hyper_parameters.sparse_fea_num')
  27. dense_feature_dim = config.get('hyper_parameters.dense_input_dim')
  28. sparse_input_slot = config.get('hyper_parameters.sparse_inputs_slots')
  29. dnn_model = net.DNNLayer(sparse_feature_number, sparse_feature_dim,
  30. dense_feature_dim, sparse_input_slot - 1,
  31. fc_sizes)
  32. return dnn_model
  33. # define feeds which convert numpy of batch data to paddle.tensor
  34. def create_feeds(self, batch_data, config):
  35. dense_feature_dim = config.get('hyper_parameters.dense_input_dim')
  36. sparse_tensor = []
  37. for b in batch_data[:-1]:
  38. sparse_tensor.append(
  39. paddle.to_tensor(b.numpy().astype('int64').reshape(-1, 1)))
  40. dense_tensor = paddle.to_tensor(batch_data[-1].numpy().astype(
  41. 'float32').reshape(-1, dense_feature_dim))
  42. label = sparse_tensor[0]
  43. return label, sparse_tensor[1:], dense_tensor
  44. # define loss function by predicts and label
  45. def create_loss(self, raw_predict_2d, label):
  46. cost = paddle.nn.functional.cross_entropy(
  47. input=raw_predict_2d, label=label)
  48. avg_cost = paddle.mean(x=cost)
  49. return avg_cost
  50. # define optimizer
  51. def create_optimizer(self, dy_model, config):
  52. lr = config.get("hyper_parameters.optimizer.learning_rate", 0.001)
  53. optimizer = paddle.optimizer.Adam(
  54. learning_rate=lr, parameters=dy_model.parameters())
  55. return optimizer
  56. # define metrics such as auc/acc
  57. # multi-task need to define multi metric
  58. def create_metrics(self):
  59. metrics_list_name = ["auc"]
  60. auc_metric = paddle.metric.Auc("ROC")
  61. metrics_list = [auc_metric]
  62. return metrics_list, metrics_list_name
  63. # construct train forward phase
  64. def train_forward(self, dy_model, metrics_list, batch_data, config):
  65. label, sparse_tensor, dense_tensor = self.create_feeds(batch_data,
  66. config)
  67. raw_pred_2d = dy_model.forward(sparse_tensor, dense_tensor)
  68. loss = self.create_loss(raw_pred_2d, label)
  69. # update metrics
  70. predict_2d = paddle.nn.functional.softmax(raw_pred_2d)
  71. metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
  72. print_dict = {'loss': loss}
  73. # print_dict = None
  74. return loss, metrics_list, print_dict
  75. def infer_forward(self, dy_model, metrics_list, batch_data, config):
  76. label, sparse_tensor, dense_tensor = self.create_feeds(batch_data,
  77. config)
  78. raw_pred_2d = dy_model.forward(sparse_tensor, dense_tensor)
  79. # update metrics
  80. predict_2d = paddle.nn.functional.softmax(raw_pred_2d)
  81. metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
  82. return metrics_list, None