#!/bin/sh # 广告数据生产 set -x export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8 export PATH=$SPARK_HOME/bin:$PATH export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf export JAVA_HOME=/usr/lib/jvm/java-1.8.0 source /root/anaconda3/bin/activate py37 # 全局常量 HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop FM_HOME=/root/sunmingze/alphaFM TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_v4 BUCKET_FEATURE_PATH=/dw/recommend/model/33_ad_train_data_v4 TABLE=alg_recsys_ad_sample_all today="$(date +%Y%m%d)" today_early_1="$(date -d '1 days ago' +%Y%m%d)" /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \ --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \ ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ tablePart:64 repartition:32 \ beginStr:${today_early_1}00 endStr:${today_early_1}12 \ savePath:${TRAIN_PATH} \ table:${TABLE} \ filterHours:00,01,02,03,04,05,06,07 \ idDefaultValue:0.1 & /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \ --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \ ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ tablePart:64 repartition:32 \ beginStr:${today_early_1}13 endStr:${today_early_1}18 \ savePath:${TRAIN_PATH} \ table:${TABLE} \ filterHours:00,01,02,03,04,05,06,07 \ idDefaultValue:0.1 & /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \ --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \ ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ tablePart:64 repartition:32 \ beginStr:${today_early_1}19 endStr:${today_early_1}23 \ savePath:${TRAIN_PATH} \ table:${TABLE} \ filterHours:00,01,02,03,04,05,06,07 \ idDefaultValue:0.1 & wait /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20240718 \ --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \ ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ beginStr:${today_early_1} endStr:${today_early_1} repartition:100 \ filterNames:_4h_,_5h_,adid_,targeting_conversion_ \ readPath:${TRAIN_PATH} \ savePath:${BUCKET_FEATURE_PATH}