25_xgb_make_data_origin_bucket.sh 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. #!/bin/sh
  2. set -x
  3. export PATH=$SPARK_HOME/bin:$PATH
  4. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  5. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  6. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  7. sh_path=$(dirname $0)
  8. source ${sh_path}/00_common.sh
  9. source /root/anaconda3/bin/activate py37
  10. make_origin_data() {
  11. local step_start_time=$(date +%s)
  12. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  13. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  14. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  15. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  16. tablePart:64 repartition:32 \
  17. beginStr:${today_early_1}00 endStr:${today_early_1}12 \
  18. savePath:${TRAIN_PATH} \
  19. table:${TABLE} \
  20. filterHours:00,01,02,03,04,05,06,07 \
  21. idDefaultValue:0.1 &
  22. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  23. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  24. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  25. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  26. tablePart:64 repartition:32 \
  27. beginStr:${today_early_1}13 endStr:${today_early_1}18 \
  28. savePath:${TRAIN_PATH} \
  29. table:${TABLE} \
  30. filterHours:00,01,02,03,04,05,06,07 \
  31. idDefaultValue:0.1 &
  32. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  33. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  34. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  35. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  36. tablePart:64 repartition:32 \
  37. beginStr:${today_early_1}19 endStr:${today_early_1}23 \
  38. savePath:${TRAIN_PATH} \
  39. table:${TABLE} \
  40. filterHours:00,01,02,03,04,05,06,07 \
  41. idDefaultValue:0.1 &
  42. wait
  43. local return_code=$?
  44. check_run_status $return_code $step_start_time "spark原始样本生产任务"
  45. }
  46. make_bucket_feature() {
  47. local step_start_time=$(date +%s)
  48. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  49. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20240718 \
  50. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  51. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  52. beginStr:${today_early_1} endStr:${today_early_1} repartition:100 \
  53. filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  54. readPath:${TRAIN_PATH} \
  55. savePath:${BUCKET_FEATURE_PATH}
  56. local return_code=$?
  57. check_run_status $return_code $step_start_time "spark特征分桶任务"
  58. }