|
@@ -5,6 +5,7 @@ import com.aliyun.odps.TableSchema
|
|
import com.aliyun.odps.data.Record
|
|
import com.aliyun.odps.data.Record
|
|
import com.aliyun.odps.spark.examples.myUtils.{DataTimeUtil, MyDateUtils, MyHdfsUtils, ParamUtils, env}
|
|
import com.aliyun.odps.spark.examples.myUtils.{DataTimeUtil, MyDateUtils, MyHdfsUtils, ParamUtils, env}
|
|
import examples.extractor.RankExtractorFeature_20240530
|
|
import examples.extractor.RankExtractorFeature_20240530
|
|
|
|
+import examples.utils.DateTimeUtil
|
|
import org.apache.hadoop.io.compress.GzipCodec
|
|
import org.apache.hadoop.io.compress.GzipCodec
|
|
import org.apache.spark.sql.SparkSession
|
|
import org.apache.spark.sql.SparkSession
|
|
import org.xm.Similarity
|
|
import org.xm.Similarity
|
|
@@ -92,10 +93,10 @@ object makedata_ad_31_originData_20240718 {
|
|
featureMap.put("targeting_conversion_" + b1.getString("targeting_conversion"), idDefaultValue)
|
|
featureMap.put("targeting_conversion_" + b1.getString("targeting_conversion"), idDefaultValue)
|
|
}
|
|
}
|
|
|
|
|
|
- val hour = DataTimeUtil.getHourByTimestamp(ts)
|
|
|
|
|
|
+ val hour = DateTimeUtil.getHourByTimestamp(ts)
|
|
featureMap.put("hour_" + hour, 0.1)
|
|
featureMap.put("hour_" + hour, 0.1)
|
|
|
|
|
|
- val dayOfWeek = DataTimeUtil.getDayOrWeekByTimestamp(ts)
|
|
|
|
|
|
+ val dayOfWeek = DateTimeUtil.getDayOrWeekByTimestamp(ts)
|
|
featureMap.put("dayOfWeek_" + dayOfWeek, 0.1);
|
|
featureMap.put("dayOfWeek_" + dayOfWeek, 0.1);
|
|
|
|
|
|
if (b1.containsKey("cpa")) {
|
|
if (b1.containsKey("cpa")) {
|