test_33.sh 1.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142
  1. #!/bin/sh
  2. set -x
  3. export PATH=$SPARK_HOME/bin:$PATH
  4. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  5. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  6. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  7. sh_path=$(dirname $0)
  8. source ${sh_path}/00_common.sh
  9. source /root/anaconda3/bin/activate py37
  10. today_early_1="$(date -d '1 days ago' +%Y%m%d)"
  11. start_date="20250607"
  12. end_date="20250629"
  13. TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_dev_20250623
  14. BUCKET_FEATURE_PATH=/dw/recommend/model/dev_20250623/33_ad_train_data
  15. make_bucket_feature() {
  16. local step_start_time=$(date +%s)
  17. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  18. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20250110 \
  19. --master yarn --driver-memory 2G --executor-memory 3G --executor-cores 1 --num-executors 16 \
  20. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  21. beginStr:${start_date} endStr:${end_date} repartition:64 \
  22. filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  23. bucketFileName:20250217_ad_bucket_707.txt \
  24. readPath:${TRAIN_PATH} \
  25. savePath:${BUCKET_FEATURE_PATH}
  26. local return_code=$?
  27. check_run_status ${return_code} ${step_start_time} "spark特征分桶任务"
  28. }
  29. make_bucket_feature