|
@@ -4,6 +4,7 @@ import paddle.nn.functional as F
|
|
|
import math
|
|
|
|
|
|
|
|
|
+
|
|
|
class WideDeepLayer(nn.Layer):
|
|
|
def __init__(self, sparse_feature_number, sparse_feature_dim,
|
|
|
dense_feature_dim, num_field, layer_sizes):
|
|
@@ -14,60 +15,71 @@ class WideDeepLayer(nn.Layer):
|
|
|
self.num_field = num_field
|
|
|
self.layer_sizes = layer_sizes
|
|
|
|
|
|
- self.wide_part = paddle.nn.Linear(
|
|
|
- in_features=self.dense_feature_dim,
|
|
|
- out_features=1,
|
|
|
- weight_attr=paddle.ParamAttr(
|
|
|
- initializer=paddle.nn.initializer.TruncatedNormal(
|
|
|
- mean=0.0, std=1.0 / math.sqrt(self.dense_feature_dim))))
|
|
|
-
|
|
|
- self.embedding = paddle.nn.Embedding(
|
|
|
- self.sparse_feature_number,
|
|
|
- self.sparse_feature_dim,
|
|
|
- sparse=True,
|
|
|
- weight_attr=paddle.ParamAttr(
|
|
|
- name="SparseFeatFactors",
|
|
|
- initializer=paddle.nn.initializer.Uniform()))
|
|
|
-
|
|
|
- sizes = [sparse_feature_dim * num_field + dense_feature_dim
|
|
|
- ] + self.layer_sizes + [1]
|
|
|
- acts = ["relu" for _ in range(len(self.layer_sizes))] + [None]
|
|
|
- self._mlp_layers = []
|
|
|
- for i in range(len(layer_sizes) + 1):
|
|
|
- linear = paddle.nn.Linear(
|
|
|
- in_features=sizes[i],
|
|
|
- out_features=sizes[i + 1],
|
|
|
- weight_attr=paddle.ParamAttr(
|
|
|
- initializer=paddle.nn.initializer.Normal(
|
|
|
- std=1.0 / math.sqrt(sizes[i]))))
|
|
|
- self.add_sublayer('linear_%d' % i, linear)
|
|
|
- self._mlp_layers.append(linear)
|
|
|
- if acts[i] == 'relu':
|
|
|
- act = paddle.nn.ReLU()
|
|
|
- self.add_sublayer('act_%d' % i, act)
|
|
|
- self._mlp_layers.append(act)
|
|
|
-
|
|
|
def forward(self, sparse_inputs, dense_inputs):
|
|
|
# wide part
|
|
|
- wide_output = self.wide_part(dense_inputs)
|
|
|
+ wide_w = paddle.static.create_parameter(
|
|
|
+ shape=[self.dense_feature_dim, 1],
|
|
|
+ dtype='float32',
|
|
|
+ name='wide_w',
|
|
|
+ default_initializer=paddle.nn.initializer.TruncatedNormal(
|
|
|
+ mean=0.0, std=1.0 / math.sqrt(self.dense_feature_dim)))
|
|
|
+ wide_b = paddle.static.create_parameter(
|
|
|
+ shape=[1],
|
|
|
+ dtype='float32',
|
|
|
+ name='wide_b',
|
|
|
+ default_initializer=paddle.nn.initializer.Constant(0.0))
|
|
|
+
|
|
|
+ # 使用paddle.static.nn.fc的正确方式
|
|
|
+ wide_output = paddle.static.nn.fc(
|
|
|
+ x=dense_inputs, # 使用x而不是input
|
|
|
+ size=1,
|
|
|
+ weight_attr=paddle.ParamAttr(
|
|
|
+ initializer=paddle.nn.initializer.TruncatedNormal(
|
|
|
+ mean=0.0, std=1.0 / math.sqrt(self.dense_feature_dim))),
|
|
|
+ bias_attr=paddle.ParamAttr(
|
|
|
+ initializer=paddle.nn.initializer.Constant(0.0))
|
|
|
+ )
|
|
|
|
|
|
# deep part
|
|
|
sparse_embs = []
|
|
|
- for s_input in sparse_inputs:
|
|
|
- #emb = self.embedding(s_input)
|
|
|
- emb = paddle.static.nn.sparse_embedding(s_input, size = [1024, self.sparse_feature_dim], param_attr=paddle.ParamAttr(name="embedding"))
|
|
|
+ for i, s_input in enumerate(sparse_inputs):
|
|
|
+ emb = paddle.static.nn.embedding(
|
|
|
+ input=s_input,
|
|
|
+ size=[self.sparse_feature_number, self.sparse_feature_dim],
|
|
|
+ param_attr=paddle.ParamAttr(
|
|
|
+ name=f"embedding_{i}",
|
|
|
+ initializer=paddle.nn.initializer.Uniform()))
|
|
|
emb = paddle.reshape(emb, shape=[-1, self.sparse_feature_dim])
|
|
|
sparse_embs.append(emb)
|
|
|
|
|
|
deep_output = paddle.concat(x=sparse_embs + [dense_inputs], axis=1)
|
|
|
- for n_layer in self._mlp_layers:
|
|
|
- deep_output = n_layer(deep_output)
|
|
|
+
|
|
|
+ # 创建深度网络层
|
|
|
+ sizes = [self.sparse_feature_dim * self.num_field + self.dense_feature_dim] + self.layer_sizes + [1]
|
|
|
+ acts = ["relu" for _ in range(len(self.layer_sizes))] + [None]
|
|
|
+
|
|
|
+ for i in range(len(sizes) - 1):
|
|
|
+ deep_output = paddle.static.nn.fc(
|
|
|
+ x=deep_output, # 使用x而不是input
|
|
|
+ size=sizes[i + 1],
|
|
|
+ activation=acts[i], # 使用activation而不是act
|
|
|
+ weight_attr=paddle.ParamAttr(
|
|
|
+ name=f'fc_{i}_w',
|
|
|
+ initializer=paddle.nn.initializer.Normal(
|
|
|
+ std=1.0 / math.sqrt(sizes[i]))),
|
|
|
+ bias_attr=paddle.ParamAttr(
|
|
|
+ name=f'fc_{i}_b',
|
|
|
+ initializer=paddle.nn.initializer.Constant(0.0))
|
|
|
+ )
|
|
|
|
|
|
prediction = paddle.add(x=wide_output, y=deep_output)
|
|
|
pred = F.sigmoid(prediction)
|
|
|
return pred
|
|
|
|
|
|
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
class WideDeepModel:
|
|
|
def __init__(self, sparse_feature_number=1000001, sparse_inputs_slots=27, sparse_feature_dim=10, dense_input_dim=13, fc_sizes=[400, 400, 400]):
|
|
|
self.sparse_feature_number = sparse_feature_number
|