make_data.sh 1.1 KB

123456789101112131415161718192021222324252627282930313233343536
  1. #!/bin/bash
  2. year=""
  3. table=""
  4. if(($#==2))
  5. then
  6. year=$1
  7. table=$2
  8. else
  9. year=2025
  10. table=alg_recsys_feature_behavior_profile
  11. fi
  12. set -x
  13. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  14. export PATH=$SPARK_HOME/bin:$PATH
  15. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  16. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  17. # params
  18. suffix="y,8,4,0,e,a,c,k,o,w,g,s,u,q,i,m"
  19. savePath=/dw/recommend/model/user_profile/data/
  20. # 1 生产原始数据
  21. echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
  22. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  23. --class com.aliyun.odps.spark.examples.makedata_recsys_r_rate.makedata_profile_20251114 \
  24. --master yarn --driver-memory 4G --executor-memory 6G --executor-cores 1 --num-executors 16 \
  25. --conf spark.yarn.executor.memoryoverhead=2048 \
  26. /mnt/disk1/jch/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-jar-with-dependencies.jar \
  27. table:${table} tablePart:48 \
  28. year:${year} \
  29. suffix:${suffix} \
  30. repartition:32 \
  31. savePath:${savePath} \