25_xgb_make_data_origin_bucket.sh 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #!/bin/sh
  2. set -x
  3. export PATH=$SPARK_HOME/bin:$PATH
  4. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  5. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  6. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  7. sh_path=$(dirname $0)
  8. source ${sh_path}/00_common.sh
  9. source /root/anaconda3/bin/activate py37
  10. make_origin_data() {
  11. local step_start_time=$(date +%s)
  12. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  13. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  14. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  15. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  16. tablePart:64 repartition:32 \
  17. beginStr:${today_early_1}00 endStr:${today_early_1}12 \
  18. savePath:${TRAIN_PATH} \
  19. table:${TABLE} \
  20. filterHours:00,01,02,03,04,05,06,07 \
  21. idDefaultValue:0.1 &
  22. local task1=$!
  23. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  24. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  25. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  26. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  27. tablePart:64 repartition:32 \
  28. beginStr:${today_early_1}13 endStr:${today_early_1}18 \
  29. savePath:${TRAIN_PATH} \
  30. table:${TABLE} \
  31. filterHours:00,01,02,03,04,05,06,07 \
  32. idDefaultValue:0.1 &
  33. local task2=$!
  34. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  35. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  36. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  37. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  38. tablePart:64 repartition:32 \
  39. beginStr:${today_early_1}19 endStr:${today_early_1}23 \
  40. savePath:${TRAIN_PATH} \
  41. table:${TABLE} \
  42. filterHours:00,01,02,03,04,05,06,07 \
  43. idDefaultValue:0.1 &
  44. local task3=$!
  45. wait ${task1}
  46. local task1_return_code=$?
  47. wait ${task2}
  48. local task2_return_code=$?
  49. wait ${task3}
  50. local task3_return_code=$?
  51. check_run_status ${task1_return_code} ${step_start_time} "spark原始样本生产任务: 生产00~12数据异常"
  52. check_run_status ${task2_return_code} ${step_start_time} "spark原始样本生产任务: 生产13~18数据异常"
  53. check_run_status ${task3_return_code} ${step_start_time} "spark原始样本生产任务: 生产19~23数据异常"
  54. }
  55. make_bucket_feature() {
  56. local step_start_time=$(date +%s)
  57. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  58. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20240718 \
  59. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  60. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  61. beginStr:${today_early_1} endStr:${today_early_1} repartition:100 \
  62. filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  63. readPath:${TRAIN_PATH} \
  64. savePath:${BUCKET_FEATURE_PATH}
  65. local return_code=$?
  66. check_run_status ${return_code} ${step_start_time} "spark特征分桶任务"
  67. }
  68. make_origin_data_v2() {
  69. local step_start_time=$(date +%s)
  70. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  71. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  72. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  73. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  74. tablePart:64 repartition:32 \
  75. beginStr:${today_early_1}00 endStr:${today_early_1}12 \
  76. savePath:${TRAIN_PATH} \
  77. table:${TABLE} \
  78. filterHours:00,01,02,03,04,05,06,07 \
  79. idDefaultValue:0.1 &
  80. local task1=$!
  81. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  82. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  83. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  84. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  85. tablePart:64 repartition:32 \
  86. beginStr:${today_early_1}13 endStr:${today_early_1}18 \
  87. savePath:${TRAIN_PATH} \
  88. table:${TABLE} \
  89. filterHours:00,01,02,03,04,05,06,07 \
  90. idDefaultValue:0.1 &
  91. local task2=$!
  92. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  93. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20240718 \
  94. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  95. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  96. tablePart:64 repartition:32 \
  97. beginStr:${today_early_1}19 endStr:${today_early_1}23 \
  98. savePath:${TRAIN_PATH} \
  99. table:${TABLE} \
  100. filterHours:00,01,02,03,04,05,06,07 \
  101. idDefaultValue:0.1 &
  102. local task3=$!
  103. wait ${task1}
  104. local task1_return_code=$?
  105. wait ${task2}
  106. local task2_return_code=$?
  107. wait ${task3}
  108. local task3_return_code=$?
  109. check_run_status_v2 ${task1_return_code} "Spark原始样本生产任务" ${step_start_time} "Spark原始样本生产任务: 生产00~12数据异常"
  110. check_run_status_v2 ${task2_return_code} "Spark原始样本生产任务" ${step_start_time} "Spark原始样本生产任务: 生产13~18数据异常"
  111. check_run_status_v2 ${task3_return_code} "Spark原始样本生产任务" ${step_start_time} "Spark原始样本生产任务: 生产19~23数据异常"
  112. }
  113. make_bucket_feature_v2() {
  114. local step_start_time=$(date +%s)
  115. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  116. --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20240718 \
  117. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  118. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  119. beginStr:${today_early_1} endStr:${today_early_1} repartition:100 \
  120. filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  121. readPath:${TRAIN_PATH} \
  122. savePath:${BUCKET_FEATURE_PATH}
  123. local return_code=$?
  124. check_run_status_v2 ${return_code} Spark特征分桶任务 ${step_start_time} "Spark特征分桶任务"
  125. }