make_data.sh 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. #!/bin/bash
  2. start_date=""
  3. end_date=""
  4. table=""
  5. if(($#==3))
  6. then
  7. start_date=$1
  8. end_date=$2
  9. table=$3
  10. else
  11. start_date=20251216
  12. end_date=20251216
  13. table=alg_ad_display_fission_rate_20251218
  14. fi
  15. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  16. export PATH=$SPARK_HOME/bin:$PATH
  17. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  18. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  19. # params
  20. savePath=/dw/recommend/model/ad_display/data
  21. # 1 生产原始数据
  22. echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
  23. set -x
  24. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  25. --class com.aliyun.odps.spark.examples.makedata_recsys_r_rate.makedata_display_ad_20251218 \
  26. --master yarn --driver-memory 4G --executor-memory 6G --executor-cores 1 --num-executors 8 \
  27. --conf spark.yarn.executor.memoryoverhead=2048 \
  28. /mnt/disk1/jch/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-jar-with-dependencies.jar \
  29. table:${table} \
  30. tablePart:64 \
  31. beginStr:${start_date} \
  32. endStr:${end_date} \
  33. repartition:32 \
  34. savePath:${savePath} \