#!/bin/sh set -x export PATH=$SPARK_HOME/bin:$PATH export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf export JAVA_HOME=/usr/lib/jvm/java-1.8.0 export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8 sh_path=$(dirname $0) source ${sh_path}/00_common.sh source /root/anaconda3/bin/activate py37 today_early_1="$(date -d '1 days ago' +%Y%m%d)" start_date="20250607" end_date="20250629" TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_dev_20250623 BUCKET_FEATURE_PATH=/dw/recommend/model/dev_20250623/33_ad_train_data make_bucket_feature() { local step_start_time=$(date +%s) /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \ --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20250110 \ --master yarn --driver-memory 2G --executor-memory 3G --executor-cores 1 --num-executors 16 \ ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \ beginStr:${start_date} endStr:${end_date} repartition:64 \ filterNames:_4h_,_5h_,adid_,targeting_conversion_ \ bucketFileName:20250217_ad_bucket_707.txt \ readPath:${TRAIN_PATH} \ savePath:${BUCKET_FEATURE_PATH} local return_code=$? check_run_status ${return_code} ${step_start_time} "spark特征分桶任务" } make_bucket_feature