#!/bin/sh set -ex # nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 & # 原始数据table name #table='alg_recsys_sample_all' table='alg_recsys_sample_all_test' # 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据 begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)" end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)" beginHhStr=00 endHhStr=23 # 各节点产出hdfs文件绝对路径 originDataPath=/dw/recommend/model/13_sample_data/ valueDataPath=/dw/recommend/model/14_feature_data/ bucketDataPath=/dw/recommend/model/16_train_data/ # 0 判断上游表是否生产完成,最长等待到12点 # shellcheck disable=SC2039 source /root/anaconda3/bin/activate py37 # shellcheck disable=SC2154 echo "----------step1------------开始校验是否生产完数据,分区信息:begin_early_2_Str:${begin_early_2_Str}${beginHhStr},end_early_2_Str:${end_early_2_Str}${endHhStr}" while true; do python_return_code=$(python /root/joe/recommend-emr-dataprocess/qiaojialiang/checkHiveDataUtil.py --table ${table} --begin_early_2_Str ${begin_early_2_Str}${beginHhStr} --end_early_2_Str ${end_early_2_Str}${endHhStr}) echo "python 返回值:${python_return_code}" if [ $python_return_code -eq 0 ]; then echo "Python程序返回0,校验存在数据,退出循环。" break fi echo "Python程序返回非0值,不存在数据,等待五分钟后再次调用。" sleep 300 current_hour=$(date +%H) current_minute=$(date +%M) # shellcheck disable=SC2039 if (( current_hour > max_hour || (current_hour == max_hour && current_minute >= max_minute) )); then echo "最长等待时间已到,失败:${current_hour}-${current_minute}" exit 1 fi done # 1 生产原始数据 echo "----------step2------------开始根据${table}生产原始数据" /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_qiao.makedata_13_originData_20240705 \ --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \ ../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ tablePart:64 repartition:32 \ beginStr:${begin_early_2_Str}${beginHhStr} endStr:${end_early_2_Str}${endHhStr} \ savePath:${originDataPath} \ table:${table} if [ $? -ne 0 ]; then echo "Spark原始样本生产任务执行失败" exit 1 else echo "spark原始样本生产执行成功" fi # 2 特征值拼接 echo "----------step3------------开始特征值拼接" /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_qiao.makedata_14_valueData_20240705 \ --master yarn --driver-memory 1G --executor-memory 3G --executor-cores 1 --num-executors 32 \ ../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ readPath:${originDataPath} \ savePath:${valueDataPath} \ beginStr:${begin_early_2_Str} endStr:${end_early_2_Str} repartition:1000 if [ $? -ne 0 ]; then echo "Spark特征值拼接处理任务执行失败" exit 1 else echo "spark特征值拼接处理执行成功" fi # 3 特征分桶 echo "----------step4------------根据特征分桶生产重打分特征数据" /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_qiao.makedata_16_bucketData_20240705 \ --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \ ../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ readPath:${valueDataPath} \ savePath:${bucketDataPath} \ beginStr:${begin_early_2_Str} endStr:${end_early_2_Str} repartition:1000 if [ $? -ne 0 ]; then echo "Spark特征分桶处理任务执行失败" exit 1 else echo "spark特征分桶处理执行成功" fi