Browse Source

feat:添加补数据脚本

zhaohaipeng 5 months ago
parent
commit
1cfdda9557

+ 19 - 1
recommend/20_vid_avg_score.sh

@@ -16,9 +16,27 @@ TXT_PATH=/mnt/disk1/20240729
 HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
 FM_PREDICT=/root/sunmingze/alphaFM/bin/fm_predict
 
+vids=(22895200 22751457 14146727 22847440 22927926 22858609 22974689 22563167 22959023 22970515 22946931 22994781 20720060 22979110)
 
 
-vids=(22895200 22751457 14146727 22847440 22927926 22858609 22974689 22563167 22959023 22970515 22946931 22994781 20720060 22979110)
+restore_score() {
+    for(( i = 0; i < ${#vids[@]}; i++)) do
+        vid=${vids[i]}
+        score_avg=$(awk '{
+            score = $2
+            new_score = ( 0.1 * score ) / ( 1 - 0.9 * score)
+            sum += new_score
+            count++
+        } END {
+            if ( count > 0 ){
+                print sum / count
+            } else {
+                print "NaN"
+            }
+        }' ${PREDICT_PATH}/${model_name}_${predict_date}_${vid}.txt)
+        echo -e "VID: ${vid} 平均分计算结果: ${score_avg} \n\t数据路径: ${PREDICT_PATH}/${model_name}_${predict_date}_${vid}.txt"
+    done
+}
 
 main() {
     for(( i = 0; i < ${#vids[@]}; i++)) do

+ 7 - 4
recommend/data_new_table.sh → recommend/21_make_data_new_table.sh

@@ -1,14 +1,13 @@
 #!/bin/sh
 set -x
 
-source /root/anaconda3/bin/activate py37
 
 export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
 export PATH=$SPARK_HOME/bin:$PATH
 export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
 export JAVA_HOME=/usr/lib/jvm/java-1.8.0
 
-#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+source /root/anaconda3/bin/activate py37
 
 # 原始数据table name
 table='alg_recsys_sample_all_v2'
@@ -23,7 +22,7 @@ max_minute=00
 # 源数据文件
 originDataPath=/dw/recommend/model/41_recsys_sample_data_new_table/
 # 特征分桶
-bucketDataPath=/dw/recommend/model/43_recsys_train_data_new_table
+bucketDataPath=/dw/recommend/model/43_recsys_train_data_new_table/
 # hadoop
 HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
 
@@ -83,4 +82,8 @@ if [ $? -ne 0 ]; then
    exit 1
 else
    echo "spark特征分桶任务执行成功"
-fi
+fi
+
+
+# 定时任务配置
+# 0 11 * * * cd /root/zhaohp/recommend-emr-dataprocess && /bin/sh ./recommend/21_make_data_new_table.sh > logs/recommend/21_make_data_new_table/$(date +\%Y\%m\%d\%H\%M).log 2>&1

+ 0 - 37
recommend/21_vid_score_restore.sh

@@ -1,37 +0,0 @@
-#!/bin/sh
-
-# 将负样本采样后的数据还原,再计算平均值
-
-
-set -x
-
-predict_date=$1
-model_name=$2
-
-PROJECT_HOME=/root/zhaohp/recommend-emr-dataprocess/
-PREDICT_PATH=${PROJECT_HOME}/predict/recommend/
-
-
-
-vids=(22895200 22751457 14146727 22847440 22927926 22858609 22974689 22563167 22959023 22970515 22946931 22994781 20720060 22979110)
-
-main() {
-    for(( i = 0; i < ${#vids[@]}; i++)) do
-        vid=${vids[i]}
-        score_avg=$(awk '{
-            score = $2
-            new_score = ( 0.1 * score ) / ( 1 - 0.9 * score)
-            sum += new_score
-            count++
-        } END {
-            if ( count > 0 ){
-                print sum / count
-            } else {
-                print "NaN"
-            }
-        }' ${PREDICT_PATH}/${model_name}_${predict_date}_${vid}.txt)
-        echo -e "VID: ${vid} 平均分计算结果: ${score_avg} \n\t数据路径: ${PREDICT_PATH}/${model_name}_${predict_date}_${vid}.txt"
-    done
-}
-
-main

+ 73 - 0
recommend/22_supplementary_data_new_table.sh

@@ -0,0 +1,73 @@
+#!/bin/sh
+set -x
+
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+source /root/anaconda3/bin/activate py37
+
+# 原始数据table name
+table='alg_recsys_sample_all_v2'
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data_new_table/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data_new_table/
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+
+# 1 生产原始数据
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
+
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:2024103114 endStr:2024110119 \
+savePath:${originDataPath} \
+table:${table} &
+
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:2024110120 endStr:2024110223 \
+savePath:${originDataPath} \
+table:${table} &
+
+
+wait
+if [ $? -ne 0 ]; then
+   echo "Spark原始样本生产任务执行失败"
+   exit 1
+else
+   echo "spark原始样本生产执行成功"
+fi
+
+# 特征采样分桶
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_fu_sample_20240709 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:/dw/recommend/model/41_recsys_sample_data_new_table \
+savePath:/dw/recommend/model/43_recsys_train_data_new_table_274_sample_01 \
+beginStr:20241031 endStr:20241102 repartition:500 \
+filterNames:ROS fuSampleRate:0.1 \
+fileName:20240609_bucket_314.txt \
+whatLabel:is_return whatApps:0,3,4,21,17
+
+if [ $? -ne 0 ]; then
+   echo "Spark特征分桶任务执行失败"
+   exit 1
+else
+   echo "spark特征分桶任务执行成功"
+fi
+
+
+# 定时任务配置
+# 0 11 * * * cd /root/zhaohp/recommend-emr-dataprocess && /bin/sh ./recommend/data_new_table.sh > logs/recommend/data_new_table/$(date +\%Y\%m\%d\%H\%M).log 2>&1