Prechádzať zdrojové kódy

feat:添加补数据脚本

zhaohaipeng 8 mesiacov pred
rodič
commit
21de8a0e7c

+ 7 - 7
ad/02_ad_model_update_twice_daily.sh

@@ -166,7 +166,7 @@ make_train_bucket_feature() {
     --class com.aliyun.odps.spark.zhp.makedata_ad.makedata_ad_33_bucketData_20240717 \
     --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
     ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
-    beginStr:${train_begin_str} endStr:${train_end_str} repartition:100 \
+    beginStr:${train_begin_str:0:8} endStr:${train_end_str:0:8} repartition:100 \
     filterNames:adid_,targeting_conversion_ \
     readPath:${originDataSavePath} \
     savePath:${trainBucketFeaturePath}
@@ -178,7 +178,7 @@ make_predict_bucket_feature() {
     --class com.aliyun.odps.spark.zhp.makedata_ad.makedata_ad_33_bucketData_20240717 \
     --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
     ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
-    beginStr:${predict_begin_str} endStr:${predict_end_str} repartition:100 \
+    beginStr:${predict_begin_str:0:8} endStr:${predict_end_str:0:8} repartition:100 \
     filterNames:adid_,targeting_conversion_ \
     readPath:${originDataSavePath} \
     savePath:${predictBucketFeaturePath}
@@ -356,17 +356,17 @@ main() {
 
     make_bucket_feature
 
-    model_train
+    # model_train
 
     # auc_compare
 
-    model_to_online_format
+    # model_to_online_format
 
-    model_upload_oss
+    # model_upload_oss
 
-    model_local_back
+    # model_local_back
 
-    success_inform
+    # success_inform
 }
 
 

+ 0 - 0
ad/20_new_ad__model_train_predict_auc.sh → ad/20_new_ad_model_train_predict_auc.sh


+ 55 - 0
ad/24_ad_data_make.sh

@@ -0,0 +1,55 @@
+#!/bin/sh
+
+# 广告数据生产
+
+set -x 
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+# 全局常量
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+FM_HOME=/root/sunmingze/alphaFM
+
+TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_v4
+BUCKET_FEATURE_PATH=/dw/recommend/model/33_ad_train_data_v4
+TABLE=alg_recsys_ad_sample_all
+
+today="$(date +%Y%m%d)"
+today_early_1="$(date -d '1 days ago' +%Y%m%d)"
+
+make_origin_data() {
+    /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+    --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
+    --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+    ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+    tablePart:64 repartition:32 \
+    beginStr:${today_early_1}00 endStr:${today_early_1}23 \
+    savePath: ${TRAIN_PATH} \
+    table: ${TABLE} \
+    filterHours:00,01,02,03,04,05,06,07 \
+    idDefaultValue:0.1 
+}
+
+make_bucket_feature() {
+    /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+    --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20240718 \
+    --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+    ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+    beginStr:${today_early_1} endStr:${today_early_1} repartition:100 \
+    filterNames:_4h_,_5h_,_ecpm,ecpm_,adid_,targeting_conversion_ \
+    readPath:${TRAIN_PATH} \
+    savePath:${BUCKET_FEATURE_PATH} \
+}
+
+main() {
+    make_origin_data
+
+    make_bucket_feature
+}
+
+main