03_xgb_train.sh 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. #!/bin/sh
  2. set -x
  3. export PATH=$SPARK_HOME/bin:$PATH
  4. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  5. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  6. export PREDICT_CACHE_PATH=/root/zhaohp/XGB/predict_cache/
  7. export SEGMENT_BASE_PATH=/dw/recommend/model/36_model_attachment/score_calibration_file
  8. sh_path=$(cd $(dirname $0); pwd)
  9. source ${sh_path}/00_common.sh
  10. source /root/anaconda3/bin/activate py37
  11. # 全局常量
  12. LOG_PREFIX=广告模型训练任务
  13. HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
  14. TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_v4
  15. BUCKET_FEATURE_PATH=/dw/recommend/model/33_ad_train_data_v4
  16. TABLE=alg_recsys_ad_sample_all
  17. # 特征文件名
  18. feature_file=20240703_ad_feature_name.txt
  19. # 模型本地临时保存路径
  20. model_local_home=/root/zhaohp/XGB/
  21. # 模型HDFS保存路径,测试时修改为其他路径,避免影响线上
  22. MODEL_PATH=/dw/recommend/model/35_ad_model
  23. # 预测结果保存路径,测试时修改为其他路径,避免影响线上
  24. PREDICT_RESULT_SAVE_PATH=/dw/recommend/model/34_ad_predict_data
  25. # 模型OSS保存路径,测试时修改为其他路径,避免影响线上
  26. MODEL_OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
  27. # 线上模型名,测试时修改为其他模型名,避免影响线上
  28. model_name=model_xgb_351_1000_v2
  29. # 线上校准文件名
  30. OSS_CALIBRATION_FILE_NAME=model_xgb_351_1000_v2_calibration
  31. # 用于存放一些临时的文件
  32. PREDICT_CACHE_PATH=/root/zhaohp/XGB/predict_cache
  33. # 本地保存HDFS模型路径文件,测试时修改为其他模型名,避免影响线上
  34. model_path_file=${model_local_home}/online_model_path.txt
  35. # 获取当前是星期几,1表示星期一
  36. current_day_of_week="$(date +"%u")"
  37. # 任务开始时间
  38. start_time=$(date +%s)
  39. # 前一天
  40. today_early_1="$(date -d '1 days ago' +%Y%m%d)"
  41. # 线上模型在HDFS中的路径
  42. online_model_path=`cat ${model_path_file}`
  43. # 训练用的数据路径
  44. train_data_path=""
  45. # 评估用的数据路径
  46. predict_date_path=""
  47. #评估结果保存路径
  48. new_model_predict_result_path=""
  49. # 模型保存路径
  50. model_save_path=""
  51. # 评测结果保存路径,后续需要根据此文件评估是否要更新模型
  52. predict_analyse_file_path=""
  53. # 校准文件保存路径
  54. calibration_file_path=""
  55. # 保存模型评估的分析结果
  56. old_incr_rate_avg=0
  57. new_incr_rate_avg=0
  58. # Top10的详情
  59. top10_msg=""
  60. # AUC值
  61. old_auc=0
  62. new_auc=0
  63. declare -A real_score_map
  64. declare -A old_score_map
  65. declare -A new_score_map
  66. init() {
  67. declare -a date_keys=()
  68. local count=1
  69. local current_data="$(date -d '2 days ago' +%Y%m%d)"
  70. # 循环获取前 n 天的非节日日期
  71. while [[ ${count} -le 7 ]]; do
  72. date_key=$(date -d "${current_data}" +%Y%m%d)
  73. # 判断是否是节日,并拼接训练数据路径
  74. if [ $(is_not_holidays ${date_key}) -eq 1 ]; then
  75. # 将 date_key 放入数组
  76. date_keys+=("${date_key}")
  77. if [[ -z ${train_data_path} ]]; then
  78. train_data_path="${BUCKET_FEATURE_PATH}/${date_key}"
  79. else
  80. train_data_path="${BUCKET_FEATURE_PATH}/${date_key},${train_data_path}"
  81. fi
  82. count=$((count + 1))
  83. else
  84. echo "日期: ${date_key}是节日,跳过"
  85. fi
  86. current_data=$(date -d "${current_data} -1 day" +%Y%m%d)
  87. done
  88. last_index=$((${#date_keys[@]} - 1))
  89. train_first_day=${date_keys[$last_index]}
  90. train_last_day=${date_keys[0]}
  91. model_save_path=${MODEL_PATH}/${model_name}_${train_first_day: -4}_${train_last_day: -4}
  92. predict_date_path=${BUCKET_FEATURE_PATH}/${today_early_1}
  93. new_model_predict_result_path=${PREDICT_RESULT_SAVE_PATH}/${today_early_1}_351_1000_${train_first_day: -4}_${train_last_day: -4}
  94. online_model_predict_result_path=${PREDICT_RESULT_SAVE_PATH}/${today_early_1}_351_1000_${online_model_path: -9}
  95. predict_analyse_file_path=${model_local_home}/predict_analyse_file/${today_early_1}_351_1000_analyse.txt
  96. calibration_file_path=${model_local_home}/${OSS_CALIBRATION_FILE_NAME}.txt
  97. echo "init param train_data_path: ${train_data_path}"
  98. echo "init param predict_date_path: ${predict_date_path}"
  99. echo "init param new_model_predict_result_path: ${new_model_predict_result_path}"
  100. echo "init param online_model_predict_result_path: ${online_model_predict_result_path}"
  101. echo "init param model_save_path: ${model_save_path}"
  102. echo "init param online_model_path: ${online_model_path}"
  103. echo "init param feature_file: ${feature_file}"
  104. echo "init param model_name: ${model_name}"
  105. echo "init param model_local_home: ${model_local_home}"
  106. echo "init param model_oss_path: ${MODEL_OSS_PATH}"
  107. echo "init param predict_analyse_file_path: ${predict_analyse_file_path}"
  108. echo "init param calibration_file_path: ${calibration_file_path}"
  109. echo "init param current_day_of_week: ${current_day_of_week}"
  110. echo "当前Python环境安装的Python版本: $(python --version)"
  111. echo "当前Python环境安装的三方包: $(python -m pip list)"
  112. }
  113. xgb_train() {
  114. local step_start_time=$(date +%s)
  115. /opt/apps/SPARK3/spark-3.3.1-hadoop3.2-1.0.5/bin/spark-class org.apache.spark.deploy.SparkSubmit \
  116. --class com.tzld.piaoquan.recommend.model.train_01_xgb_ad_20240808 \
  117. --master yarn --driver-memory 6G --executor-memory 10G --executor-cores 1 --num-executors 31 \
  118. --conf spark.yarn.executor.memoryoverhead=2048 \
  119. --conf spark.shuffle.service.enabled=true \
  120. --conf spark.shuffle.service.port=7337 \
  121. --conf spark.shuffle.consolidateFiles=true \
  122. --conf spark.shuffle.manager=sort \
  123. --conf spark.storage.memoryFraction=0.4 \
  124. --conf spark.shuffle.memoryFraction=0.5 \
  125. --conf spark.default.parallelism=200 \
  126. /root/zhangbo/recommend-model/recommend-model-produce/target/recommend-model-produce-jar-with-dependencies.jar \
  127. featureFile:20240703_ad_feature_name.txt \
  128. trainPath:${train_data_path} \
  129. testPath:${predict_date_path} \
  130. savePath:${new_model_predict_result_path} \
  131. modelPath:${model_save_path} \
  132. eta:0.01 gamma:0.0 max_depth:5 num_round:1000 num_worker:30 repartition:20
  133. local return_code=$?
  134. check_run_status ${return_code} ${step_start_time} "XGB模型训练任务" "XGB模型训练失败"
  135. }
  136. calc_model_predict() {
  137. local count=0
  138. local max_line=10
  139. local old_total_diff=0
  140. local new_total_diff=0
  141. top10_msg="| CID | 老模型相对真实CTCVR的变化 | 新模型相对真实CTCVR的变化 |"
  142. top10_msg+=" \n| ---- | --------- | -------- |"
  143. while read -r line && [ ${count} -lt ${max_line} ]; do
  144. # 使用 ! 取反判断,只有当行中不包含 "cid" 时才执行继续的逻辑
  145. if [[ "${line}" == *"cid"* ]]; then
  146. continue
  147. fi
  148. read -a numbers <<< "${line}"
  149. # 分数分别保存
  150. real_score_map[${numbers[0]}]=${numbers[3]}
  151. old_score_map[${numbers[0]}]=${numbers[6]}
  152. new_score_map[${numbers[0]}]=${numbers[7]}
  153. # 拼接Top10详情的飞书消息
  154. top10_msg="${top10_msg} \n| ${numbers[0]} | ${numbers[6]} | ${numbers[7]} | "
  155. # 计算top10相对误差绝对值的均值
  156. old_abs_score=$( echo "${numbers[6]} * ((${numbers[6]} >= 0) * 2 - 1)" | bc -l )
  157. new_abs_score=$( echo "${numbers[7]} * ((${numbers[7]} >= 0) * 2 - 1)" | bc -l )
  158. old_total_diff=$( echo "${old_total_diff} + ${old_abs_score}" | bc -l )
  159. new_total_diff=$( echo "${new_total_diff} + ${new_abs_score}" | bc -l )
  160. count=$((${count} + 1))
  161. done < "${predict_analyse_file_path}"
  162. local return_code=$?
  163. check_run_status ${return_code} ${step_start_time} "计算Top10差异" "计算Top10差异异常"
  164. old_incr_rate_avg=$( echo "scale=6; ${old_total_diff} / ${count}" | bc -l )
  165. check_run_status $? ${step_start_time} "计算老模型Top10差异" "计算老模型Top10差异异常"
  166. new_incr_rate_avg=$( echo "scale=6; ${new_total_diff} / ${count}" | bc -l )
  167. check_run_status $? ${step_start_time} "计算新模型Top10差异" "计算新模型Top10差异异常"
  168. echo "老模型Top10差异平均值: ${old_incr_rate_avg}"
  169. echo "新模型Top10差异平均值: ${new_incr_rate_avg}"
  170. echo "新老模型分数对比: "
  171. for cid in "${!new_score_map[@]}"; do
  172. echo "\t CID: $cid, 老模型分数: ${old_score_map[$cid]}, 新模型分数: ${new_score_map[$cid]}"
  173. done
  174. }
  175. calc_auc() {
  176. old_auc=`cat ${PREDICT_CACHE_PATH}/old_1.txt | /root/sunmingze/AUC/AUC`
  177. new_auc=`cat ${PREDICT_CACHE_PATH}/new_1.txt | /root/sunmingze/AUC/AUC`
  178. }
  179. model_predict() {
  180. # 线上模型评估最新的数据
  181. local step_start_time=$(date +%s)
  182. /opt/apps/SPARK3/spark-3.3.1-hadoop3.2-1.0.5/bin/spark-class org.apache.spark.deploy.SparkSubmit \
  183. --class com.tzld.piaoquan.recommend.model.pred_01_xgb_ad_hdfsfile_20240813 \
  184. --master yarn --driver-memory 1G --executor-memory 3G --executor-cores 1 --num-executors 30 \
  185. --conf spark.yarn.executor.memoryoverhead=1024 \
  186. --conf spark.shuffle.service.enabled=true \
  187. --conf spark.shuffle.service.port=7337 \
  188. --conf spark.shuffle.consolidateFiles=true \
  189. --conf spark.shuffle.manager=sort \
  190. --conf spark.storage.memoryFraction=0.4 \
  191. --conf spark.shuffle.memoryFraction=0.5 \
  192. --conf spark.default.parallelism=200 \
  193. /root/zhangbo/recommend-model/recommend-model-produce/target/recommend-model-produce-jar-with-dependencies.jar \
  194. featureFile:20240703_ad_feature_name.txt \
  195. testPath:${predict_date_path} \
  196. savePath:${online_model_predict_result_path} \
  197. modelPath:${online_model_path}
  198. local return_code=$?
  199. check_run_status ${return_code} ${step_start_time} "线上模型评估${predict_date_path: -8}的数据" "线上模型评估${predict_date_path: -8}的数据失败"
  200. # 结果分析
  201. local python_return_code=$(python ${sh_path}/model_predict_analyse.py -op ${online_model_predict_result_path} -np ${new_model_predict_result_path} -af ${predict_analyse_file_path} -cf ${calibration_file_path})
  202. check_run_status ${python_return_code} ${step_start_time} "分析线上模型评估${predict_date_path: -8}的数据" "分析线上模型评估${predict_date_path: -8}的数据失败"
  203. calc_model_predict
  204. calc_auc
  205. if (( $(echo "${new_incr_rate_avg} > 0.100000" | bc -l ) ));then
  206. echo "线上模型评估${predict_date_path: -8}的数据,绝对误差大于0.1,请检查"
  207. check_run_status 1 ${step_start_time} "${predict_date_path: -8}的数据,绝对误差大于0.1" "线上模型评估${predict_date_path: -8}的数据,绝对误差大于0.1,请检查"
  208. exit 1
  209. fi
  210. # 对比两个模型的差异
  211. score_diff=$( echo "${new_incr_rate_avg} - ${old_incr_rate_avg}" | bc -l )
  212. if (( $(echo "${score_diff} > 0.050000" | bc -l ) ));then
  213. echo "两个模型评估${predict_date_path: -8}的数据,两个模型分数差异为: ${score_diff}, 大于0.05, 请检查"
  214. check_run_status 1 ${step_start_time} "两个模型评估${predict_date_path: -8}的数据" "两个模型评估${predict_date_path: -8}的数据,两个模型分数差异为: ${score_diff}, 大于0.05"
  215. exit 1
  216. fi
  217. }
  218. # 主方法
  219. main() {
  220. init
  221. xgb_train
  222. model_predict
  223. }
  224. main