make_train_sample.sh 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. #!/bin/bash
  2. feature_file=""
  3. bucket_file=""
  4. beginStr=""
  5. endStr=""
  6. if(($#==4))
  7. then
  8. feature_file=$1
  9. bucket_file=$2
  10. beginStr=$3
  11. endStr=$4
  12. else
  13. exit -1
  14. fi
  15. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  16. export PATH=$SPARK_HOME/bin:$PATH
  17. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  18. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  19. # params
  20. readPath=/dw/recommend/model/ad_display/data
  21. whatLabel=r1_uv
  22. fuSampleRate=0.12
  23. #fuSampleRate=0.6
  24. notUseBucket=1
  25. repartition=10
  26. savePath=/dw/recommend/model/ad_display/sample
  27. # 1 生产原始数据
  28. echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
  29. set -x
  30. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  31. --class com.aliyun.odps.spark.examples.makedata_recsys_r_rate.makedata_display_ad_sample_20251218 \
  32. --master yarn --driver-memory 4G --executor-memory 6G --executor-cores 1 --num-executors 8 \
  33. --conf spark.yarn.executor.memoryoverhead=2048 \
  34. --files ${feature_file},${bucket_file} \
  35. /mnt/disk1/jch/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-jar-with-dependencies.jar \
  36. readPath:${readPath} \
  37. beginStr:${beginStr} \
  38. endStr:${endStr} \
  39. whatLabel:${whatLabel} \
  40. fuSampleRate:${fuSampleRate} \
  41. notUseBucket:${notUseBucket} \
  42. featureFile:${feature_file} \
  43. featureBucket:${bucket_file} \
  44. repartition:${repartition} \
  45. savePath:${savePath} \