#!/bin/sh set -x feature_file="" year="" if(($#==2)) then feature_file=$1 year=$2 else exit -1 fi # env export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf export JAVA_HOME=/usr/lib/jvm/java-1.8.0 # params base_data_path=/dw/recommend/model/user_profile/gender/sample/train model_path=/dw/recommend/model/user_profile/gender/model/model_xgb minCnt=10 save_path=/dw/recommend/model/user_profile/gender/eval repartition=4 test_data_path="" suffix_array=(i m) for suffix in "${suffix_array[@]}" do one_day_data_path="${base_data_path}/${year}_$suffix" if [[ -z $test_data_path ]] then test_data_path=$one_day_data_path else test_data_path="$test_data_path,$one_day_data_path" fi done echo `date` "predict gender sample" /opt/apps/SPARK3/spark-3.3.1-hadoop3.2-1.0.5/bin/spark-class org.apache.spark.deploy.SparkSubmit \ --class com.tzld.piaoquan.recommend.model.pred_profile_gender_xgb_20251114 \ --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 8 \ --conf spark.yarn.executor.memoryoverhead=1024 \ --conf spark.shuffle.service.enabled=true \ --conf spark.shuffle.service.port=7337 \ --conf spark.shuffle.consolidateFiles=true \ --conf spark.shuffle.manager=sort \ --conf spark.storage.memoryFraction=0.4 \ --conf spark.shuffle.memoryFraction=0.5 \ --conf spark.default.parallelism=200 \ --conf spark.debug.maxToStringFields=100 \ --files ${feature_file} \ /mnt/disk1/jch/recommend-model/recommend-model-produce/target/recommend-model-produce-jar-with-dependencies.jar \ modelPath:${model_path} \ testPath:${test_data_path} \ featureFile:${feature_file} \ minCnt:${minCnt} \ savePath:${save_path} \ repartition:${repartition} \