Explorar o código

feat:修改补数据脚本

zhaohaipeng hai 1 mes
pai
achega
b1f2ce7b8f
Modificáronse 2 ficheiros con 4 adicións e 33 borrados
  1. 4 4
      recommend/00_train_data_make.sh
  2. 0 29
      recommend/23_str_train_data_make.sh

+ 4 - 4
recommend/00_train_data_make.sh

@@ -1,13 +1,13 @@
 #!/bin/sh
 set -x
 
-dts=('20250225' '20250226' '20250227' '20250228' '20250301' '20250302' '20230303')
+dts=('20250303' '20250304' '20250305' '20250306' '20250307')
 for dt in "${dts[@]}"; do
     echo "开始处理: ${dt}"
 
     /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
     --class com.aliyun.odps.spark.examples.makedata_recsys.v20250218.makedata_recsys_41_originData_20250218 \
-    --master yarn --driver-memory 2G --executor-memory 5G --executor-cores 1 --num-executors 12 \
+    --master yarn --driver-memory 2G --executor-memory 5G --executor-cores 1 --num-executors 10 \
     --conf spark.yarn.executor.memoryOverhead=2G \
     /root/zhaohp/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
     tablePart:64 beginStr:${dt}00 endStr:${dt}11 repartition:32 \
@@ -16,7 +16,7 @@ for dt in "${dts[@]}"; do
 
     /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
     --class com.aliyun.odps.spark.examples.makedata_recsys.v20250218.makedata_recsys_41_originData_20250218 \
-    --master yarn --driver-memory 2G --executor-memory 5G --executor-cores 1 --num-executors 12 \
+    --master yarn --driver-memory 2G --executor-memory 5G --executor-cores 1 --num-executors 10 \
     --conf spark.yarn.executor.memoryOverhead=2G \
     /root/zhaohp/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
     tablePart:64 beginStr:${dt}12 endStr:${dt}17 repartition:32 \
@@ -25,7 +25,7 @@ for dt in "${dts[@]}"; do
 
     /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
     --class com.aliyun.odps.spark.examples.makedata_recsys.v20250218.makedata_recsys_41_originData_20250218 \
-    --master yarn --driver-memory 2G --executor-memory 5G --executor-cores 1 --num-executors 12 \
+    --master yarn --driver-memory 2G --executor-memory 5G --executor-cores 1 --num-executors 10 \
     --conf spark.yarn.executor.memoryOverhead=2G \
     /root/zhaohp/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
     tablePart:64 beginStr:${dt}18 endStr:${dt}23 repartition:32 \

+ 0 - 29
recommend/23_str_train_data_make.sh

@@ -1,29 +0,0 @@
-#!/bin/sh
-set -x
-
-dts=('20250224' '20250225' '20250226' '20250227' '20250228' '20250301' '20250302')
-for dt in "${dts[@]}"; do
-    echo "开始处理: ${dt}"
-
-    # STR负样本采样
-    /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
-    --class com.aliyun.odps.spark.examples.makedata_recsys.v20250218.makedata_recsys_41_str_train_data_20250218 \
-    --master yarn --driver-memory 3G --executor-memory 6G --executor-cores 1 --num-executors 25 \
-    --conf spark.driver.maxResultSize=2g \
-    ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
-    readPath:/dw/recommend/model/41_recsys_origin_date/${dt}*/* \
-    savePath:/dw/recommend/model/41_recsys_str_train_data/${dt} \
-    fuSampleRate:0.05 whatLabel:is_share repartition:64
-
-    echo "${dt} 负样本采样完成"
-
-done
-
-
-/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
---class com.aliyun.odps.spark.examples.makedata_recsys.v20250218.makedata_recsys_43_str_data_bucket_20250218 \
---master yarn --driver-memory 4G --executor-memory 8G --executor-cores 1 --num-executors 16 \
-./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
-readPath:/dw/recommend/model/41_recsys_str_train_data/ \
-savePath:/dw/recommend/model/43_recsys_str_data_bucket/ \
-beginStr:20250224 endStr:20250302 whatLabel:is_share fileName:20250218_bucket_322.txt