| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849 |
- #!/bin/bash
- feature_file=""
- bucket_file=""
- beginStr=""
- endStr=""
- if(($#==4))
- then
- feature_file=$1
- bucket_file=$2
- beginStr=$3
- endStr=$4
- else
- exit -1
- fi
- export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
- export PATH=$SPARK_HOME/bin:$PATH
- export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
- export JAVA_HOME=/usr/lib/jvm/java-1.8.0
- # params
- readPath=/dw/recommend/model/ad_display/data
- whatLabel=r1_uv
- fuSampleRate=0.12
- #fuSampleRate=0.6
- notUseBucket=1
- repartition=10
- savePath=/dw/recommend/model/ad_display/sample
- # 1 生产原始数据
- echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
- set -x
- /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
- --class com.aliyun.odps.spark.examples.makedata_recsys_r_rate.makedata_display_ad_sample_20251218 \
- --master yarn --driver-memory 4G --executor-memory 6G --executor-cores 1 --num-executors 8 \
- --conf spark.yarn.executor.memoryoverhead=2048 \
- --files ${feature_file},${bucket_file} \
- /mnt/disk1/jch/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-jar-with-dependencies.jar \
- readPath:${readPath} \
- beginStr:${beginStr} \
- endStr:${endStr} \
- whatLabel:${whatLabel} \
- fuSampleRate:${fuSampleRate} \
- notUseBucket:${notUseBucket} \
- featureFile:${feature_file} \
- featureBucket:${bucket_file} \
- repartition:${repartition} \
- savePath:${savePath} \
|