stat_freq.sh 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. #!/bin/bash
  2. start_date=""
  3. end_date=""
  4. sub_path="feat_freq"
  5. if(($#==3))
  6. then
  7. start_date=$1
  8. end_date=$2
  9. sub_path=$3
  10. else
  11. exit -1
  12. fi
  13. export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
  14. export PATH=$SPARK_HOME/bin:$PATH
  15. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  16. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  17. # params
  18. BASE_DATA_PATH=/dw/recommend/model/ad_display/data
  19. data_path=""
  20. for((i=0; i<=21; i++))
  21. do
  22. data_date=$(date -d "$start_date $i day" +"%Y%m%d")
  23. if [ "$data_date" -le "$end_date" ]
  24. then
  25. one_day_data_path="${BASE_DATA_PATH}/${data_date}"
  26. if [[ -z $data_path ]]
  27. then
  28. data_path=$one_day_data_path
  29. else
  30. data_path="$data_path,$one_day_data_path"
  31. fi
  32. fi
  33. done
  34. featureIndex=2
  35. repartition=1
  36. savePath=/dw/recommend/model/832_recsys_analysis_data/${sub_path}
  37. # 1 生产原始数据
  38. echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
  39. set -x
  40. /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  41. --class com.aliyun.odps.spark.examples.makedata_recsys_r_rate.stat_feature \
  42. --master yarn --driver-memory 4G --executor-memory 6G --executor-cores 1 --num-executors 16 \
  43. --conf spark.yarn.executor.memoryoverhead=2048 \
  44. /mnt/disk1/jch/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-jar-with-dependencies.jar \
  45. dataPath:${data_path} \
  46. featureIndex:${featureIndex} \
  47. repartition:${repartition} \
  48. savePath:${savePath} \