Bladeren bron

feat:添加评估结果分析脚本

zhaohaipeng 6 maanden geleden
bovenliggende
commit
437ee6af8d
1 gewijzigde bestanden met toevoegingen van 67 en 3 verwijderingen
  1. 67 3
      ad/01_ad_model_update.sh

+ 67 - 3
ad/01_ad_model_update.sh

@@ -12,8 +12,8 @@ export JAVA_HOME=/usr/lib/jvm/java-1.8.0
 # 全局常量
 HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
 
-TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_v4
-BUCKET_FEATURE_PATH=/dw/recommend/model/33_ad_train_data_v4
+TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_v4_test
+BUCKET_FEATURE_PATH=/dw/recommend/model/33_ad_train_data_v4_test
 MODEL_PATH=/dw/recommend/model/35_ad_model_test
 PREDICT_RESULT_SAVE_PATH=/dw/recommend/model/34_ad_predict_data_test
 TABLE=alg_recsys_ad_sample_all
@@ -139,6 +139,66 @@ check_ad_hive() {
 
 }
 
+make_origin_data() {
+  
+  local step_start_time=$(date +%s)
+
+  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
+  --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+  tablePart:64 repartition:32 \
+  beginStr:${today_early_1}00 endStr:${today_early_1}12 \
+  savePath:${TRAIN_PATH} \
+  table:${TABLE} \
+  filterHours:00,01,02,03,04,05,06,07 \
+  idDefaultValue:0.1 &
+
+  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
+  --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+  tablePart:64 repartition:32 \
+  beginStr:${today_early_1}13 endStr:${today_early_1}18 \
+  savePath:${TRAIN_PATH} \
+  table:${TABLE} \
+  filterHours:00,01,02,03,04,05,06,07 \
+  idDefaultValue:0.1 &
+
+  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
+  --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+  tablePart:64 repartition:32 \
+  beginStr:${today_early_1}19 endStr:${today_early_1}23 \
+  savePath:${TRAIN_PATH} \
+  table:${TABLE} \
+  filterHours:00,01,02,03,04,05,06,07 \
+  idDefaultValue:0.1 &
+
+  wait
+
+  local return_code=$?
+  check_run_status $return_code $step_start_time "spark原始样本生产任务"
+}
+
+make_bucket_feature() {
+
+  local step_start_time=$(date +%s)
+  
+  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20240718 \
+  --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+  beginStr:${today_early_1} endStr:${today_early_1} repartition:100 \
+  filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
+  readPath:${TRAIN_PATH} \
+  savePath:${BUCKET_FEATURE_PATH}
+
+  local return_code=$?
+  check_run_status $return_code $step_start_time "spark特征分桶任务"
+}
+
 xgb_train() {
   local step_start_time=$(date +%s)
 
@@ -223,7 +283,11 @@ model_upload_oss() {
 main() {
   init
 
-  # check_ad_hive
+  check_ad_hive
+
+  make_origin_data
+
+  make_bucket_feature
 
   xgb_train