test_32.sh 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647
  1. #!/bin/sh
  2. set -x
  3. export PATH=$SPARK_HOME/bin:$PATH
  4. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  5. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  6. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  7. sh_path=$(dirname $0)
  8. source ${sh_path}/00_common.sh
  9. source /root/anaconda3/bin/activate py37
  10. today_early_1="$(date -d '1 days ago' +%Y%m%d)"
  11. TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_dev_20250623
  12. TABLE=alg_recsys_ad_sample_all
  13. make_32() {
  14. local step_start_time=$(date +%s)
  15. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  16. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20250623 \
  17. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  18. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  19. readPath:/dw/recommend/model/31_ad_sample_data_dev_20250623 \
  20. savePath:/dw/recommend/model/32_bucket_data_dev_20250623 \
  21. fileName:ad_bucket_688.txt
  22. local task1=$!
  23. wait ${task1}
  24. local task1_return_code=$?
  25. }
  26. end_date="20250628"
  27. date_offset=21 # 修改为需要向前推的天数
  28. # 循环计算并输出日期
  29. for ((i=0; i<=date_offset; i++)); do
  30. # 计算当前偏移日期(结束日期 - i天)
  31. today_early_1=$(date -d "$end_date - $i days" +%Y%m%d)
  32. echo $today_early_1
  33. make_origin_data
  34. done