123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109 |
- #!/bin/sh
- set -x
- source /root/anaconda3/bin/activate py37
- export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
- export PATH=$SPARK_HOME/bin:$PATH
- export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
- export JAVA_HOME=/usr/lib/jvm/java-1.8.0
- # nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
- # 原始数据table name
- table='alg_recsys_sample_all_v2'
- # 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
- begin_early_2_Str=20240728
- end_early_2_Str=20240728
- beginHhStr=00
- endHhStr=23
- max_hour=05
- max_minute=00
- # 各节点产出hdfs文件绝对路径
- # 源数据文件
- originDataPath=/dw/recommend/model/41_recsys_sample_data_new_table/
- # 特征分桶
- bucketDataPath=/dw/recommend/model/43_recsys_train_data_new_table/
- # hadoop
- HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
- # 0 判断上游表是否生产完成,最长等待到max_hour点
- # shellcheck disable=SC2154
- # echo "$(date +%Y-%m-%d_%H-%M-%S)----------step0------------开始校验是否生产完数据,分区信息:beginStr:${begin_early_2_Str}${beginHhStr},endStr:${end_early_2_Str}${endHhStr}"
- # while true; do
- # python_return_code=$(python /root/joe/recommend-emr-dataprocess/qiaojialiang/checkHiveDataUtil.py --table ${table} --beginStr ${begin_early_2_Str}${beginHhStr} --endStr ${end_early_2_Str}${endHhStr})
- # echo "python 返回值:${python_return_code}"
- # if [ $python_return_code -eq 0 ]; then
- # echo "Python程序返回0,校验存在数据,退出循环。"
- # break
- # fi
- # echo "Python程序返回非0值,不存在数据,等待五分钟后再次调用。"
- # sleep 300
- # current_hour=$(date +%H)
- # current_minute=$(date +%M)
- # # shellcheck disable=SC2039
- # if (( current_hour > max_hour || (current_hour == max_hour && current_minute >= max_minute) )); then
- # echo "最长等待时间已到,失败:${current_hour}-${current_minute}"
- # exit 1
- # fi
- # done
- # 1 生产原始数据
- echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
- /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
- --class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
- --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
- ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
- tablePart:64 repartition:32 \
- beginStr:${begin_early_2_Str}00 endStr:${end_early_2_Str}09 \
- savePath:${originDataPath} \
- table:${table} &
- /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
- --class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
- --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
- ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
- tablePart:64 repartition:32 \
- beginStr:${begin_early_2_Str}10 endStr:${end_early_2_Str}15 \
- savePath:${originDataPath} \
- table:${table} &
- /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
- --class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
- --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
- ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
- tablePart:64 repartition:32 \
- beginStr:${begin_early_2_Str}16 endStr:${end_early_2_Str}23 \
- savePath:${originDataPath} \
- table:${table} &
- wait
- if [ $? -ne 0 ]; then
- echo "Spark原始样本生产任务执行失败"
- exit 1
- else
- echo "spark原始样本生产执行成功"
- fi
- # 2 特征分桶
- echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据"
- /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
- --class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
- --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
- ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
- readPath:${originDataPath} \
- savePath:${bucketDataPath} \
- beginStr:${begin_early_2_Str} endStr:${end_early_2_Str} repartition:500 \
- filterNames:XXXXXXXXX \
- fileName:20240609_bucket_314.txt \
- whatLabel:is_return whatApps:0,4,21,17
- if [ $? -ne 0 ]; then
- echo "Spark特征分桶处理任务执行失败"
- exit 1
- else
- echo "spark特征分桶处理执行成功"
- fi
- echo "$(date +%Y-%m-%d_%H-%M-%S)----------step5------------spark特征分桶处理执行成功:${begin_early_2_Str}"
|