02_ad_model_dnn_v11_update.sh 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. #!/bin/sh
  2. set -x
  3. export PATH=$SPARK_HOME/bin:$PATH
  4. export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
  5. export JAVA_HOME=/usr/lib/jvm/java-1.8.0
  6. export PREDICT_CACHE_PATH=/root/fengzhoutian/xgboost-dev/predict_cache/
  7. export SEGMENT_BASE_PATH=/dw/recommend/model/36_model_attachment/score_calibration_file
  8. sh_path=$(cd $(dirname $0); pwd)
  9. source ${sh_path}/00_common.sh
  10. source /root/anaconda3/bin/activate py37
  11. # 全局常量
  12. LOG_PREFIX=广告模型训练任务
  13. HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
  14. TRAIN_PATH=/dw/recommend/model/31_ad_sample_data_v5
  15. BUCKET_FEATURE_PATH=/dw/recommend/model/33_ad_train_data_v5
  16. TABLE=alg_recsys_ad_sample_all
  17. # 特征文件名
  18. feature_file=20240703_ad_feature_name.txt
  19. # 模型本地临时保存路径
  20. model_local_home=/root/fengzhoutian/xgboost-dev/
  21. # 模型HDFS保存路径,测试时修改为其他路径,避免影响线上
  22. MODEL_PATH=/dw/recommend/model/35_ad_model
  23. # 预测结果保存路径,测试时修改为其他路径,避免影响线上
  24. PREDICT_RESULT_SAVE_PATH=/dw/recommend/model/34_ad_predict_data
  25. # 模型OSS保存路径,测试时修改为其他路径,避免影响线上
  26. MODEL_OSS_PATH=oss://art-recommend.oss-cn-hangzhou-internal.aliyuncs.com/fengzhoutian/
  27. # 线上模型名,测试时修改为其他模型名,避免影响线上
  28. model_name=model_xgb
  29. model_ver=351_1000_14d_v1
  30. model_name=${model_name}_${model_ver}
  31. model_local_home=${model_local_home}/${model_name}
  32. # 线上校准文件名
  33. OSS_CALIBRATION_FILE_NAME=${model_name}_calibration
  34. # 用于存放一些临时的文件
  35. PREDICT_CACHE_PATH=/root/fengzhoutian/xgboost-dev/predict_cache/
  36. # 本地保存HDFS模型路径文件,测试时修改为其他模型名,避免影响线上
  37. model_path_file=${model_local_home}/online_model_path.txt
  38. # 获取当前是星期几,1表示星期一
  39. current_day_of_week="$(date +"%u")"
  40. # 任务开始时间
  41. start_time=$(date +%s)
  42. # 前一天
  43. today_early_1="$(date -d '1 days ago' +%Y%m%d)"
  44. # 线上模型在HDFS中的路径
  45. # online_model_path=`cat ${model_path_file}`
  46. # 训练用的数据路径
  47. train_data_path=""
  48. train_data_days=14
  49. # 评估用的数据路径
  50. predict_date_path=""
  51. #评估结果保存路径
  52. new_model_predict_result_path=""
  53. # 模型保存路径
  54. model_save_path=""
  55. # 评测结果保存路径,后续需要根据此文件评估是否要更新模型
  56. predict_analyse_file_path=""
  57. # 校准文件保存路径
  58. calibration_file_path=""
  59. # 保存模型评估的分析结果
  60. old_incr_rate_avg=0
  61. new_incr_rate_avg=0
  62. # Top10的详情
  63. top10_msg=""
  64. # AUC值
  65. old_auc=0
  66. new_auc=0
  67. declare -A real_score_map
  68. declare -A old_score_map
  69. declare -A new_score_map
  70. # 校验命令的退出码
  71. check_run_status() {
  72. local status=$1
  73. local step_start_time=$2
  74. local step_name=$3
  75. local msg=$4
  76. local step_end_time=$(date +%s)
  77. local step_elapsed=$(($step_end_time - $step_start_time))
  78. if [[ -n "${old_auc}" && "${old_auc}" != "0" ]]; then
  79. msg+="\n\t - 老模型AUC: ${old_auc}"
  80. fi
  81. if [[ -n "${new_auc}" && "${new_auc}" != "0" ]]; then
  82. msg+="\n\t - 新模型AUC: ${new_auc}"
  83. fi
  84. if [ ${status} -ne 0 ]; then
  85. echo "${LOG_PREFIX} -- ${step_name}失败: 耗时 ${step_elapsed}"
  86. local elapsed=$(($step_end_time - $start_time))
  87. /root/anaconda3/bin/python ${sh_path}/ad_monitor_util.py --level error --msg "${msg}" --start "${start_time}" --elapsed "${elapsed}" --top10 "${top10_msg}"
  88. exit 1
  89. else
  90. echo "${LOG_PREFIX} -- ${step_name}成功: 耗时 ${step_elapsed}"
  91. fi
  92. }
  93. send_success_upload_msg(){
  94. # 发送更新成功通知
  95. local msg=" 广告模型文件更新完成"
  96. msg+="\n\t - 老模型AUC: ${old_auc}"
  97. msg+="\n\t - 新模型AUC: ${new_auc}"
  98. msg+="\n\t - 老模型Top10差异平均值: ${old_incr_rate_avg}"
  99. msg+="\n\t - 新模型Top10差异平均值: ${new_incr_rate_avg}"
  100. msg+="\n\t - 模型在HDFS中的路径: ${model_save_path}"
  101. msg+="\n\t - 模型上传OSS中的路径: ${MODEL_OSS_PATH}/${model_name}.tar.gz"
  102. local step_end_time=$(date +%s)
  103. local elapsed=$((${step_end_time} - ${start_time}))
  104. /root/anaconda3/bin/python ${sh_path}/ad_monitor_util.py --level info --msg "${msg}" --start "${start_time}" --elapsed "${elapsed}" --top10 "${top10_msg}"
  105. }
  106. init() {
  107. set +x
  108. declare -a date_keys=()
  109. local count=1
  110. local current_data="$(date -d "${today_early_1} -1 day" +%Y%m%d)"
  111. # 循环获取前 n 天的非节日日期
  112. while [[ ${count} -le $train_data_days ]]; do
  113. date_key=$(date -d "${current_data}" +%Y%m%d)
  114. # 判断是否是节日,并拼接训练数据路径
  115. if [ $(is_not_holidays ${date_key}) -eq 1 ]; then
  116. # 将 date_key 放入数组
  117. date_keys+=("${date_key}")
  118. if [[ -z ${train_data_path} ]]; then
  119. train_data_path="${BUCKET_FEATURE_PATH}/${date_key}"
  120. else
  121. train_data_path="${BUCKET_FEATURE_PATH}/${date_key},${train_data_path}"
  122. fi
  123. count=$((count + 1))
  124. else
  125. echo "日期: ${date_key}是节日,跳过"
  126. fi
  127. current_data=$(date -d "${current_data} -1 day" +%Y%m%d)
  128. done
  129. last_index=$((${#date_keys[@]} - 1))
  130. train_first_day=${date_keys[$last_index]}
  131. train_last_day=${date_keys[0]}
  132. model_save_path=${MODEL_PATH}/${model_name}_${train_first_day: -4}_${train_last_day: -4}
  133. predict_date_path=${BUCKET_FEATURE_PATH}/${today_early_1}
  134. new_model_predict_result_path=${PREDICT_RESULT_SAVE_PATH}/${today_early_1}_${model_ver}_${train_first_day: -4}_${train_last_day: -4}
  135. online_model_predict_result_path=${PREDICT_RESULT_SAVE_PATH}/${today_early_1}_${model_ver}_${online_model_path: -9}
  136. predict_analyse_file_path=${model_local_home}/predict_analyse_file/${today_early_1}_${model_ver}_analyse.txt
  137. calibration_file_path=${model_local_home}/${OSS_CALIBRATION_FILE_NAME}.txt
  138. echo "init param train_data_path: ${train_data_path}"
  139. echo "init param predict_date_path: ${predict_date_path}"
  140. echo "init param new_model_predict_result_path: ${new_model_predict_result_path}"
  141. echo "init param online_model_predict_result_path: ${online_model_predict_result_path}"
  142. echo "init param model_save_path: ${model_save_path}"
  143. echo "init param online_model_path: ${online_model_path}"
  144. echo "init param feature_file: ${feature_file}"
  145. echo "init param model_name: ${model_name}"
  146. echo "init param model_local_home: ${model_local_home}"
  147. echo "init param model_oss_path: ${MODEL_OSS_PATH}"
  148. echo "init param predict_analyse_file_path: ${predict_analyse_file_path}"
  149. echo "init param calibration_file_path: ${calibration_file_path}"
  150. echo "init param current_day_of_week: ${current_day_of_week}"
  151. echo "当前Python环境安装的Python版本: $(python --version)"
  152. echo "当前Python环境安装的三方包: $(python -m pip list)"
  153. set -x
  154. mkdir -p ${model_local_home}
  155. }
  156. # 校验大数据任务是否执行完成
  157. check_ad_hive() {
  158. local step_start_time=$(date +%s)
  159. local max_hour=05
  160. local max_minute=30
  161. local elapsed=0
  162. while true; do
  163. local python_return_code=$(python ${sh_path}/ad_utils.py --excute_program check_ad_origin_hive --partition ${today_early_1} --hh 23)
  164. elapsed=$(($(date +%s) - ${step_start_time}))
  165. if [ "${python_return_code}" -eq 0 ]; then
  166. break
  167. fi
  168. echo "Python程序返回非0值,等待五分钟后再次调用。"
  169. sleep 300
  170. local current_hour=$(date +%H)
  171. local current_minute=$(date +%M)
  172. if (( ${current_hour} > ${max_hour} || ( ${current_hour} == ${max_hour} && ${current_minute} >= ${max_minute} ) )); then
  173. local msg="大数据数据生产校验失败, 分区: ${today_early_1}"
  174. echo -e "${LOG_PREFIX} -- 大数据数据生产校验 -- ${msg}: 耗时 ${elapsed}"
  175. /root/anaconda3/bin/python ${sh_path}/ad_monitor_util.py --level error --msg "${msg}" --start "${start_time}" --elapsed "${elapsed}"
  176. exit 1
  177. fi
  178. done
  179. echo "${LOG_PREFIX} -- 大数据数据生产校验 -- 大数据数据生产校验通过: 耗时 ${elapsed}"
  180. }
  181. bucket_feature_from_origin_to_hive() {
  182. (
  183. export outputTable=ad_easyrec_train_data_v3_sampled
  184. source ${sh_path}/25_xgb_make_data_origin_bucket.sh
  185. make_bucket_feature_from_origin_to_hive
  186. )
  187. }
  188. xgb_train() {
  189. local step_start_time=$(date +%s)
  190. /opt/apps/SPARK3/spark-3.3.1-hadoop3.2-1.0.5/bin/spark-class org.apache.spark.deploy.SparkSubmit \
  191. --class com.tzld.piaoquan.recommend.model.train_01_xgb_ad_20250104 \
  192. --master yarn --driver-memory 6G --executor-memory 10G --executor-cores 2 --num-executors 11 \
  193. --conf spark.yarn.executor.memoryoverhead=2048 \
  194. --conf spark.shuffle.service.enabled=true \
  195. --conf spark.shuffle.service.port=7337 \
  196. --conf spark.shuffle.consolidateFiles=true \
  197. --conf spark.shuffle.manager=sort \
  198. --conf spark.storage.memoryFraction=0.4 \
  199. --conf spark.shuffle.memoryFraction=0.5 \
  200. --conf spark.default.parallelism=200 \
  201. /root/fengzhoutian/recommend-model/recommend-model-produce/target/recommend-model-produce-jar-with-dependencies.jar \
  202. featureFile:20240703_ad_feature_name.txt \
  203. trainPath:${train_data_path} \
  204. testPath:${predict_date_path} \
  205. savePath:${new_model_predict_result_path} \
  206. modelPath:${model_save_path} \
  207. eta:0.01 gamma:0.0 max_depth:5 num_round:1000 num_worker:10 repartition:20 \
  208. negSampleRate:0.04
  209. local return_code=$?
  210. check_run_status ${return_code} ${step_start_time} "XGB模型训练任务" "XGB模型训练失败"
  211. }
  212. calc_model_predict() {
  213. local count=0
  214. local max_line=10
  215. local old_total_diff=0
  216. local new_total_diff=0
  217. top10_msg="| CID | 老模型相对真实CTCVR的变化 | 新模型相对真实CTCVR的变化 |"
  218. top10_msg+=" \n| ---- | --------- | -------- |"
  219. while read -r line && [ ${count} -lt ${max_line} ]; do
  220. # 使用 ! 取反判断,只有当行中不包含 "cid" 时才执行继续的逻辑
  221. if [[ "${line}" == *"cid"* ]]; then
  222. continue
  223. fi
  224. read -a numbers <<< "${line}"
  225. # 分数分别保存
  226. real_score_map[${numbers[0]}]=${numbers[3]}
  227. old_score_map[${numbers[0]}]=${numbers[6]}
  228. new_score_map[${numbers[0]}]=${numbers[7]}
  229. # 拼接Top10详情的飞书消息
  230. top10_msg="${top10_msg} \n| ${numbers[0]} | ${numbers[6]} | ${numbers[7]} | "
  231. # 计算top10相对误差绝对值的均值
  232. old_abs_score=$( echo "${numbers[6]} * ((${numbers[6]} >= 0) * 2 - 1)" | bc -l )
  233. new_abs_score=$( echo "${numbers[7]} * ((${numbers[7]} >= 0) * 2 - 1)" | bc -l )
  234. old_total_diff=$( echo "${old_total_diff} + ${old_abs_score}" | bc -l )
  235. new_total_diff=$( echo "${new_total_diff} + ${new_abs_score}" | bc -l )
  236. count=$((${count} + 1))
  237. done < "${predict_analyse_file_path}"
  238. local return_code=$?
  239. check_run_status ${return_code} ${step_start_time} "计算Top10差异" "计算Top10差异异常"
  240. old_incr_rate_avg=$( echo "scale=6; ${old_total_diff} / ${count}" | bc -l )
  241. check_run_status $? ${step_start_time} "计算老模型Top10差异" "计算老模型Top10差异异常"
  242. new_incr_rate_avg=$( echo "scale=6; ${new_total_diff} / ${count}" | bc -l )
  243. check_run_status $? ${step_start_time} "计算新模型Top10差异" "计算新模型Top10差异异常"
  244. echo "老模型Top10差异平均值: ${old_incr_rate_avg}"
  245. echo "新模型Top10差异平均值: ${new_incr_rate_avg}"
  246. echo "新老模型分数对比: "
  247. for cid in "${!new_score_map[@]}"; do
  248. echo "\t CID: $cid, 老模型分数: ${old_score_map[$cid]}, 新模型分数: ${new_score_map[$cid]}"
  249. done
  250. }
  251. calc_auc() {
  252. old_auc=`cat ${PREDICT_CACHE_PATH}/old_1.txt | /root/sunmingze/AUC/AUC`
  253. new_auc=`cat ${PREDICT_CACHE_PATH}/new_1.txt | /root/sunmingze/AUC/AUC`
  254. }
  255. model_predict() {
  256. # 线上模型评估最新的数据
  257. local step_start_time=$(date +%s)
  258. /opt/apps/SPARK3/spark-3.3.1-hadoop3.2-1.0.5/bin/spark-class org.apache.spark.deploy.SparkSubmit \
  259. --class com.tzld.piaoquan.recommend.model.pred_01_xgb_ad_hdfsfile_20240813 \
  260. --master yarn --driver-memory 1G --executor-memory 3G --executor-cores 1 --num-executors 30 \
  261. --conf spark.yarn.executor.memoryoverhead=1024 \
  262. --conf spark.shuffle.service.enabled=true \
  263. --conf spark.shuffle.service.port=7337 \
  264. --conf spark.shuffle.consolidateFiles=true \
  265. --conf spark.shuffle.manager=sort \
  266. --conf spark.storage.memoryFraction=0.4 \
  267. --conf spark.shuffle.memoryFraction=0.5 \
  268. --conf spark.default.parallelism=200 \
  269. /root/fengzhoutian/recommend-model/recommend-model-produce/target/recommend-model-produce-jar-with-dependencies.jar \
  270. featureFile:20240703_ad_feature_name.txt \
  271. testPath:${predict_date_path} \
  272. savePath:${online_model_predict_result_path} \
  273. negSampleRate:0.04 \
  274. modelPath:${online_model_path}
  275. local return_code=$?
  276. check_run_status ${return_code} ${step_start_time} "线上模型评估${predict_date_path: -8}的数据" "线上模型评估${predict_date_path: -8}的数据失败"
  277. }
  278. compare_predictions() {
  279. local step_start_time=$(date +%s)
  280. mkdir -p ${model_local_home}/predict_analyse_file
  281. # 结果分析
  282. python ${sh_path}/model_predict_analyse.py -op ${online_model_predict_result_path} -np ${new_model_predict_result_path} -af ${predict_analyse_file_path} -cf ${calibration_file_path}
  283. local python_return_code=$?
  284. check_run_status ${python_return_code} ${step_start_time} "分析线上模型评估${predict_date_path: -8}的数据" "分析线上模型评估${predict_date_path: -8}的数据失败"
  285. calc_model_predict
  286. calc_auc
  287. if (( $(echo "${new_incr_rate_avg} > 0.100000" | bc -l ) ));then
  288. echo "线上模型评估${predict_date_path: -8}的数据,绝对误差大于0.1,请检查"
  289. check_run_status 1 ${step_start_time} "${predict_date_path: -8}的数据,绝对误差大于0.1" "线上模型评估${predict_date_path: -8}的数据,绝对误差大于0.1,请检查"
  290. exit 1
  291. fi
  292. # 对比两个模型的差异
  293. score_diff=$( echo "${new_incr_rate_avg} - ${old_incr_rate_avg}" | bc -l )
  294. if (( $(echo "${score_diff} > 0.050000" | bc -l ) ));then
  295. echo "两个模型评估${predict_date_path: -8}的数据,两个模型分数差异为: ${score_diff}, 大于0.05, 请检查"
  296. check_run_status 1 ${step_start_time} "两个模型评估${predict_date_path: -8}的数据" "两个模型评估${predict_date_path: -8}的数据,两个模型分数差异为: ${score_diff}, 大于0.05"
  297. exit 1
  298. fi
  299. }
  300. draw_q_distribution() {
  301. local step_start_time=$(date +%s)
  302. python ${sh_path}/draw_predict_distribution.py -op ${online_model_predict_result_path} -np ${new_model_predict_result_path} --output ${today_early_1}_${model_ver}_${train_first_day: -4}_${train_last_day: -4}.png
  303. python_return_code=$?
  304. }
  305. model_upload_oss() {
  306. local step_start_time=$(date +%s)
  307. (
  308. cd ${model_local_home}
  309. ${HADOOP} fs -get ${model_save_path} ${model_name}
  310. if [ ! -d ${model_name} ]; then
  311. echo "从HDFS下载模型失败"
  312. check_run_status 1 ${step_start_time} "HDFS下载模型任务" "HDFS下载模型失败"
  313. exit 1
  314. fi
  315. tar -czvf ${model_name}_tmp.tar.gz -C ${model_name} .
  316. # 将模型文件和校准文件推送到OSS上
  317. ${HADOOP} fs -put -f ${model_name}_tmp.tar.gz ${MODEL_OSS_PATH}
  318. local return_code=$?
  319. check_run_status ${return_code} ${step_start_time} "模型上传OSS任务" "模型上传OSS失败"
  320. ${HADOOP} fs -cp -f ${MODEL_OSS_PATH}/${model_name}_tmp.tar.gz ${MODEL_OSS_PATH}/${model_name}.tar.gz
  321. local return_code=$?
  322. check_run_status ${return_code} ${step_start_time} "模型上传OSS任务" "模型重命名OSS失败"
  323. echo ${model_save_path} > ${model_path_file}
  324. )
  325. local return_code=$?
  326. check_run_status ${return_code} ${step_start_time} "模型上传OSS任务" "模型上传OSS失败"
  327. local step_end_time=$(date +%s)
  328. local elapsed=$((${step_end_time} - ${start_time}))
  329. echo -e "${LOG_PREFIX} -- 模型更新完成 -- 模型更新成功: 耗时 ${elapsed}"
  330. send_success_upload_msg
  331. #
  332. rm -f ./${model_name}.tar.gz
  333. rm -rf ./${model_name}
  334. rm -rf ${OSS_CALIBRATION_FILE_NAME}.txt
  335. }
  336. make_train_node_conf() {
  337. train_data_path=''
  338. TABLE_PART_PREFIX="odps://loghubods/tables/ad_easyrec_train_data_v3_sampled/dt"
  339. declare -a date_keys=()
  340. local count=1
  341. local current_data="$(date -d "${today_early_1} -1 day" +%Y%m%d)"
  342. # 循环获取前 n 天的非节日日期
  343. while [[ ${count} -le $train_data_days ]]; do
  344. date_key=$(date -d "${current_data}" +%Y%m%d)
  345. # 判断是否是节日,并拼接训练数据路径
  346. if [ $(is_not_holidays ${date_key}) -eq 1 ]; then
  347. # 将 date_key 放入数组
  348. date_keys+=("${date_key}")
  349. if [[ -z ${train_data_path} ]]; then
  350. train_data_path="${TABLE_PART_PREFIX}=${date_key}"
  351. else
  352. train_data_path="${TABLE_PART_PREFIX}=${date_key},${train_data_path}"
  353. fi
  354. count=$((count + 1))
  355. else
  356. echo "日期: ${date_key}是节日,跳过"
  357. fi
  358. current_data=$(date -d "${current_data} -1 day" +%Y%m%d)
  359. done
  360. last_index=$((${#date_keys[@]} - 1))
  361. train_first_day=${date_keys[$last_index]}
  362. train_last_day=${date_keys[0]}
  363. predict_data_path=${TABLE_PART_PREFIX}=${today_early_1}
  364. }
  365. # 主方法
  366. main() {
  367. init
  368. check_ad_hive
  369. bucket_feature_from_origin_to_hive
  370. make_train_node_conf
  371. exit
  372. if [ "${current_day_of_week}" -eq 1 ] || [ "${current_day_of_week}" -eq 3 ] || [ "${current_day_of_week}" -eq 5 ]; then
  373. echo "当前是周一,周三或周五,开始训练并更新模型"
  374. xgb_train
  375. model_predict
  376. compare_predictions
  377. draw_q_distribution
  378. model_upload_oss
  379. else
  380. echo "当前是周一,周三或周五,不更新模型"
  381. fi
  382. }
  383. main