#!/bin/sh set -x export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf export JAVA_HOME=/usr/lib/jvm/java-1.8.0 # params FEATURE_FILE=20250303_recsys_nor_name.txt BASE_TRAIN_DATA_PATH=/dw/recommend/model/82_recsys_nor_train_data PREDICT_RESULT_PATH=/dw/recommend/model/82_recsys_nor_predict_data MODEL_SAVE_PATH=/dw/recommend/model/82_recsys_nor_model/model_xgb start_date=20250301 end_date=20250301 test_data_path="" for((i=0; i<=21; i++)) do data_date=$(date -d "$start_date $i day" +"%Y%m%d") if [ "$data_date" -le "$end_date" ] then one_day_data_path="${BASE_TRAIN_DATA_PATH}/${data_date}" if [[ -z $test_data_path ]] then test_data_path=$one_day_data_path else test_data_path="$test_data_path,$one_day_data_path" fi fi done /opt/apps/SPARK3/spark-3.3.1-hadoop3.2-1.0.5/bin/spark-class org.apache.spark.deploy.SparkSubmit \ --class com.tzld.piaoquan.recommend.model.pred_recsys_61_xgb_nor_hdfsfile_20241209 \ --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \ --conf spark.yarn.executor.memoryoverhead=1024 \ --conf spark.shuffle.service.enabled=true \ --conf spark.shuffle.service.port=7337 \ --conf spark.shuffle.consolidateFiles=true \ --conf spark.shuffle.manager=sort \ --conf spark.storage.memoryFraction=0.4 \ --conf spark.shuffle.memoryFraction=0.5 \ --conf spark.default.parallelism=200 \ --conf spark.debug.maxToStringFields=100 \ /mnt/disk1/jch/recommend-model/recommend-model-produce/target/recommend-model-produce-jar-with-dependencies.jar \ labelLogType:0 \ labelLogBase:1.5 \ featureFile:${FEATURE_FILE} \ testPath:${test_data_path} \ savePath:${PREDICT_RESULT_PATH} \ modelPath:${MODEL_SAVE_PATH}