#!/bin/sh
set -x

export PATH=$SPARK_HOME/bin:$PATH
export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
export JAVA_HOME=/usr/lib/jvm/java-1.8.0
export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8


sh_path=$(dirname $0)
source ${sh_path}/00_common.sh

source /root/anaconda3/bin/activate py37

make_origin_data() {
  
  local step_start_time=$(date +%s)

  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20250110 \
  --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  tablePart:64 repartition:32 \
  beginStr:${today_early_1}00 endStr:${today_early_1}12 \
  savePath:${TRAIN_PATH} \
  table:${TABLE} \
  filterHours:00,01,02,03,04,05,06,07 \
  idDefaultValue:0.1 &
  local task1=$!

  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20250110 \
  --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  tablePart:64 repartition:32 \
  beginStr:${today_early_1}13 endStr:${today_early_1}18 \
  savePath:${TRAIN_PATH} \
  table:${TABLE} \
  filterHours:00,01,02,03,04,05,06,07 \
  idDefaultValue:0.1 &
  local task2=$!

  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_31_originData_20250110 \
  --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  tablePart:64 repartition:32 \
  beginStr:${today_early_1}19 endStr:${today_early_1}23 \
  savePath:${TRAIN_PATH} \
  table:${TABLE} \
  filterHours:00,01,02,03,04,05,06,07 \
  idDefaultValue:0.1 &
  local task3=$!

  wait ${task1}
  local task1_return_code=$?

  wait ${task2}
  local task2_return_code=$?

  wait ${task3}
  local task3_return_code=$?


  check_run_status ${task1_return_code} ${step_start_time} "spark原始样本生产任务: 生产00~12数据异常"
  check_run_status ${task2_return_code} ${step_start_time} "spark原始样本生产任务: 生产13~18数据异常"
  check_run_status ${task3_return_code} ${step_start_time} "spark原始样本生产任务: 生产19~23数据异常"
}



make_bucket_feature() {

  local step_start_time=$(date +%s)
  
  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketData_20250110 \
  --master yarn --driver-memory 2G --executor-memory 3G --executor-cores 1 --num-executors 16 \
  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  beginStr:${today_early_1} endStr:${today_early_1} repartition:64 \
  filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  bucketFileName:20250217_ad_bucket_688.txt \
  readPath:${TRAIN_PATH} \
  savePath:${BUCKET_FEATURE_PATH}

  local return_code=$?
  check_run_status ${return_code} ${step_start_time} "spark特征分桶任务"
}

make_bucket_feature_to_hive() {

  local step_start_time=$(date +%s)
  
  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketDataToHive_20250110 \
  --master yarn --driver-memory 2G --executor-memory 3G --executor-cores 1 --num-executors 16 \
  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  beginStr:${today_early_1} endStr:${today_early_1} repartition:64 \
  filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  table:ad_easyrec_eval_data_v2_sampled \
  partition:"dt=${today_early_1}" \
  readPath:${TRAIN_PATH} \
  negSampleRate:0.04

  local return_code=$?
  check_run_status ${return_code} ${step_start_time} "spark特征分桶任务"
}

make_bucket_feature_from_origin_to_hive() {
  local step_start_time=$(date +%s)
  neg_sample_rate=${NEG_SAMPLE_RATE:-0.04}
  
  /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  --class com.aliyun.odps.spark.examples.makedata_ad.v20240718.makedata_ad_33_bucketDataFromOriginToHive_20250228 \
  --master yarn --driver-memory 2G --executor-memory 3G --executor-cores 1 --num-executors 30 \
  --conf spark.dynamicAllocation.enabled=true \
  --conf spark.shuffle.service.enabled=true \
  --conf spark.dynamicAllocation.maxExecutors=100 \
  ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  beginStr:${today_early_1} endStr:${today_early_1} \
  filterHours:${FILTER_HOURS:-00,01,02,03,04,05,06,07} \
  filterAdverIds:${FILTER_ADVER_IDS} \
  filterNames:_4h_,_5h_,adid_,targeting_conversion_ \
  outputTable:${outputTable} \
  inputTable:alg_recsys_ad_sample_all \
  negSampleRate:${neg_sample_rate}

  local return_code=$?
  check_run_status ${return_code} ${step_start_time} "spark特征分桶任务"
}