浏览代码

Merge branch 'feature/qiao_makedata_v2' into feature_zhaohaipeng

zhaohaipeng 9 月之前
父节点
当前提交
fe2c1ad3b4
共有 41 个文件被更改,包括 2981 次插入5 次删除
  1. 8 0
      .idea/.gitignore
  2. 7 0
      .idea/codeStyles/Project.xml
  3. 5 0
      .idea/codeStyles/codeStyleConfig.xml
  4. 18 0
      .idea/compiler.xml
  5. 6 0
      .idea/encodings.xml
  6. 6 0
      .idea/inspectionProfiles/profiles_settings.xml
  7. 40 0
      .idea/jarRepositories.xml
  8. 16 0
      .idea/misc.xml
  9. 8 0
      .idea/modules.xml
  10. 8 0
      .idea/recommend-emr-dataprocess.iml
  11. 6 0
      .idea/scala_compiler.xml
  12. 6 0
      .idea/vcs.xml
  13. 31 0
      qiaojialiang/02_train_go.sh
  14. 30 0
      qiaojialiang/FeishuBot.py
  15. 86 0
      qiaojialiang/change_oss.sh
  16. 131 0
      qiaojialiang/checkHiveDataUtil.py
  17. 119 0
      qiaojialiang/check_auc.sh
  18. 133 0
      qiaojialiang/data_0729.sh
  19. 45 0
      qiaojialiang/delPredictFile.sh
  20. 65 0
      qiaojialiang/handle_0724.sh
  21. 233 0
      qiaojialiang/handle_rov.sh
  22. 104 0
      qiaojialiang/monitor_util.py
  23. 41 0
      qiaojialiang/test/demo01.py
  24. 202 0
      qiaojialiang/test/handle_rov_bak.sh
  25. 28 0
      qiaojialiang/test/multiTest.sh
  26. 32 0
      qiaojialiang/test/script1.sh
  27. 4 0
      qiaojialiang/test/script2.sh
  28. 53 0
      qiaojialiang/xunlian.sh
  29. 130 0
      qiaojialiang/xunlian_0724.sh
  30. 8 0
      spark-examples.iml
  31. 0 2
      src/main/resources/20240609_bucket_274_old.txt
  32. 0 0
      src/main/resources/20240609_bucket_314.txt
  33. 0 0
      src/main/resources/20240709_recsys_bucket_314.txt
  34. 314 0
      src/main/resources/20240709_recsys_feature_name_314.txt
  35. 278 0
      src/main/scala/com/aliyun/odps/spark/examples/makedata_qiao/makedata_13_originData_20240705.scala
  36. 91 0
      src/main/scala/com/aliyun/odps/spark/examples/makedata_qiao/makedata_14_valueData_20240705.scala
  37. 127 0
      src/main/scala/com/aliyun/odps/spark/examples/makedata_qiao/makedata_16_bucketData_20240705.scala
  38. 280 0
      src/main/scala/com/aliyun/odps/spark/examples/makedata_recsys/makedata_recsys_41_originData_20240709.scala
  39. 103 0
      src/main/scala/com/aliyun/odps/spark/examples/makedata_recsys/makedata_recsys_42_bucket_20240709.scala
  40. 130 0
      src/main/scala/com/aliyun/odps/spark/examples/makedata_recsys/makedata_recsys_43_bucketData_20240709.scala
  41. 49 3
      src/main/scala/com/aliyun/odps/spark/examples/临时记录的脚本-推荐

+ 8 - 0
.idea/.gitignore

@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml

+ 7 - 0
.idea/codeStyles/Project.xml

@@ -0,0 +1,7 @@
+<component name="ProjectCodeStyleConfiguration">
+  <code_scheme name="Project" version="173">
+    <ScalaCodeStyleSettings>
+      <option name="MULTILINE_STRING_CLOSING_QUOTES_ON_NEW_LINE" value="true" />
+    </ScalaCodeStyleSettings>
+  </code_scheme>
+</component>

+ 5 - 0
.idea/codeStyles/codeStyleConfig.xml

@@ -0,0 +1,5 @@
+<component name="ProjectCodeStyleConfiguration">
+  <state>
+    <option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
+  </state>
+</component>

+ 18 - 0
.idea/compiler.xml

@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="CompilerConfiguration">
+    <annotationProcessing>
+      <profile name="Maven default annotation processors profile" enabled="true">
+        <sourceOutputDir name="target/generated-sources/annotations" />
+        <sourceTestOutputDir name="target/generated-test-sources/test-annotations" />
+        <outputRelativeToContentRoot value="true" />
+        <module name="spark-examples" />
+      </profile>
+    </annotationProcessing>
+  </component>
+  <component name="JavacSettings">
+    <option name="ADDITIONAL_OPTIONS_OVERRIDE">
+      <module name="spark-examples" options="-parameters" />
+    </option>
+  </component>
+</project>

+ 6 - 0
.idea/encodings.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Encoding">
+    <file url="file://$PROJECT_DIR$/src/main/java" charset="UTF-8" />
+  </component>
+</project>

+ 6 - 0
.idea/inspectionProfiles/profiles_settings.xml

@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>

+ 40 - 0
.idea/jarRepositories.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="RemoteRepositoriesConfiguration">
+    <remote-repository>
+      <option name="id" value="central" />
+      <option name="name" value="Central Repository" />
+      <option name="url" value="https://repo.maven.apache.org/maven2" />
+    </remote-repository>
+    <remote-repository>
+      <option name="id" value="public" />
+      <option name="name" value="Public Repositories" />
+      <option name="url" value="http://nexus.stuuudy.com:9580/nexus/content/groups/public/" />
+    </remote-repository>
+    <remote-repository>
+      <option name="id" value="central" />
+      <option name="name" value="Maven Central repository" />
+      <option name="url" value="https://repo1.maven.org/maven2" />
+    </remote-repository>
+    <remote-repository>
+      <option name="id" value="maven-net-cloudera" />
+      <option name="name" value="cloudera Mirror" />
+      <option name="url" value="https://repository.cloudera.com/content/repositories/releases/" />
+    </remote-repository>
+    <remote-repository>
+      <option name="id" value="jboss.community" />
+      <option name="name" value="JBoss Community repository" />
+      <option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
+    </remote-repository>
+    <remote-repository>
+      <option name="id" value="stuuudy" />
+      <option name="name" value="stuuudy" />
+      <option name="url" value="http://nexus.stuuudy.com:9580/nexus/content/repositories/stuuudy/" />
+    </remote-repository>
+    <remote-repository>
+      <option name="id" value="releases" />
+      <option name="name" value="Releases" />
+      <option name="url" value="http://nexus.stuuudy.com:9580/nexus/content/repositories/releases/" />
+    </remote-repository>
+  </component>
+</project>

+ 16 - 0
.idea/misc.xml

@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Black">
+    <option name="sdkName" value="Python 3.12" />
+  </component>
+  <component name="ExternalStorageConfigurationManager" enabled="true" />
+  <component name="MavenProjectsManager">
+    <option name="originalFiles">
+      <list>
+        <option value="$PROJECT_DIR$/pom.xml" />
+      </list>
+    </option>
+    <option name="workspaceImportForciblyTurnedOn" value="true" />
+  </component>
+  <component name="ProjectRootManager" version="2" languageLevel="JDK_21" project-jdk-name="1.8" project-jdk-type="JavaSDK" />
+</project>

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/spark-examples.iml" filepath="$PROJECT_DIR$/spark-examples.iml" />
+    </modules>
+  </component>
+</project>

+ 8 - 0
.idea/recommend-emr-dataprocess.iml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>

+ 6 - 0
.idea/scala_compiler.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ScalaCompilerConfiguration">
+    <profile name="Maven 1" modules="spark-examples" />
+  </component>
+</project>

+ 6 - 0
.idea/vcs.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="" vcs="Git" />
+  </component>
+</project>

+ 31 - 0
qiaojialiang/02_train_go.sh

@@ -0,0 +1,31 @@
+#!/bin/sh
+set -ex
+
+start_date=$1
+end_date=$2
+model_name=$3
+MODEL_PATH="./model/"
+SAMPLE_PATH=$4
+bias=$5
+HADOOP="/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop"
+FM_TRAIN="/root/sunmingze/alphaFM/bin/fm_train"
+
+
+
+#$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240713/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/${model_name}_all_20240713.txt -dim ${bias} -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_0709_0712.txt
+
+current_date="$start_date"
+
+while [[ "$current_date" != "$end_date" ]]; do
+    echo -------"$current_date"----------
+
+    yesterday=$(date -d "$current_date - 1 day" +%Y%m%d)
+    echo model-day-$yesterday
+    echo data-day-$current_date
+    $HADOOP fs -text /dw/recommend/model/43_recsys_train_data/$current_date/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/${model_name}_all_$current_date.txt -dim ${bias} -core 8 -im /root/joe/recommend-emr-dataprocess/model/${model_name}_all_$yesterday.txt
+    current_date=$(date -d "$current_date + 1 day" +%Y%m%d)
+done
+
+# nohup sh 02_train_go.sh 20240714 20240715 model_nba8 /dw/recommend/model/43_recsys_train_data/ 1,1,8 > log/p2_model_bkb8_all.log 2>&1 &
+
+

+ 30 - 0
qiaojialiang/FeishuBot.py

@@ -0,0 +1,30 @@
+import requests
+import argparse
+
+
+class FeishuBot:
+    webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/cbf0bc0f-994a-489b-9f77-de0c5be107cd"
+
+    def send_message(self, message_text):
+        headers = {'Content-Type': 'application/json'}
+        data = {
+            "msg_type": "text",
+            "content": {"text": message_text}
+        }
+        response = requests.post(self.webhook_url, json=data, headers=headers)
+        response.raise_for_status()  # 如果响应状态码不是200,则抛出HTTPError异常
+        return response.json()  # 返回JSON响应(如果需要的话)
+
+
+def main():
+    parser = argparse.ArgumentParser(description='Send a message via Feishu Bot')
+    parser.add_argument('message', type=str, help='The message to send to Feishu Bot')
+    args = parser.parse_args()
+
+    bot = FeishuBot()
+    response = bot.send_message(args.message)
+    print(response)  # 打印响应内容,以便查看发送结果
+
+
+if __name__ == '__main__':
+    main()

+ 86 - 0
qiaojialiang/change_oss.sh

@@ -0,0 +1,86 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=model_nba8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+
+
+cat /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22.txt |
+  awk -F " " '{
+      if (NR == 1) {
+          print $1"\t"$2
+      } else {
+          split($0, fields, " ");
+          OFS="\t";
+          line=""
+          for (i = 1; i <= 10 && i <= length(fields); i++) {
+              line = (line ? line "\t" : "") fields[i];
+          }
+              print line
+          }
+      }' > /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22_change.txt
+if [ $? -ne 0 ]; then
+   echo "新模型文件格式转换失败"
+fi
+# 4.1.2 模型文件上传OSS
+online_model_path=${OSS_PATH}/${model_name}.txt
+$HADOOP fs -test -e ${online_model_path}
+if [ $? -eq 0 ]; then
+    echo "数据存在, 先删除。"
+    $HADOOP fs -rm -r -skipTrash ${online_model_path}
+else
+    echo "数据不存在"
+fi
+$HADOOP fs -put /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22_change.txt ${online_model_path}
+if [ $? -eq 0 ]; then
+    echo "推荐模型文件至OSS成功"
+    # 4.1.3 本地保存最新的线上使用的模型,用于下一次的AUC验证
+    cp -f ${LAST_MODEL_HOME}/model_online.txt ${LAST_MODEL_HOME}/model_online_$(date +\%Y\%m\%d).txt
+    cp -f /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22.txt ${LAST_MODEL_HOME}/model_online.txt
+    if [ $? -ne 0 ]; then
+        echo "模型备份失败"
+    fi
+    /root/anaconda3/bin/python monitor_util.py --level info --msg "荐模型数据更新 \n【任务名称】:step模型更新\n【是否成功】:success\n【信息】:已更新/root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22.txt模型}"
+else
+    echo "推荐模型文件至OSS失败"
+    /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step模型推送oss\n【是否成功】:error\n【信息】:推荐模型文件至OSS失败/root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22.txt --- ${online_model_path}"
+fi

+ 131 - 0
qiaojialiang/checkHiveDataUtil.py

@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+from odps import ODPS
+from FeishuBot import FeishuBot
+
+import argparse
+
+ODPS_CONFIG = {
+    'ENDPOINT': 'http://service.cn.maxcompute.aliyun.com/api',
+    'ACCESSID': 'LTAIWYUujJAm7CbH',
+    'ACCESSKEY': 'RfSjdiWwED1sGFlsjXv0DlfTnZTG1P',
+}
+
+
+def check_origin_hive(args):
+    project = "loghubods"
+    table = args.table
+    beginStr = args.beginStr
+    endStr = args.endStr
+    # 检查从begin到end的每一个小时级分区数据是否存在,有一个存在即算存在可以处理
+    # 如果全都为空报警
+    time_sequence = generate_time_sequence(beginStr, endStr)
+    # exist_partition = []
+    for time_str in time_sequence:
+        result = split_date_time(time_str)
+        partitionDt = result[0]
+        partitionHh = result[1]
+        count = check_data(project, table, partitionDt, partitionHh)
+        if count == 0:
+            bot = FeishuBot()
+            # msg = (
+            #     f'推荐模型数据更新 \n【任务名称】:step1校验hive数据源\n【是否成功】:success\n【信息】:table:{table},beginStr:{beginStr},endStr:{endStr}\n【详细日志】:{exist_partition}')
+            msg = (
+                f'推荐模型数据更新 \n【任务名称】:step1校验hive数据源\n【是否成功】:error\n【信息】:table:{table},{time_str}分区数据不存在,继续检查')
+            bot.send_message(msg)
+            print('1')
+            exit(1)
+        else:
+            continue
+    print('0')
+        # exist_partition.append(f'分区:dt={partitionDt}/hh={partitionHh},数据:{count}')
+# if len(exist_partition) == 0:
+#     print('1')
+#     exit(1)
+# else:
+#     bot = FeishuBot()
+#     msg = (
+#         f'推荐模型数据更新 \n【任务名称】:step1校验hive数据源\n【是否成功】:success\n【信息】:table:{table},beginStr:{beginStr},endStr:{endStr}\n【详细日志】:{exist_partition}')
+#     bot.send_message(msg)
+# print('0')
+
+
+def check_data(project, table, partitionDt, partitionDtHh) -> int:
+    """检查数据是否准备好,输出数据条数"""
+    odps = ODPS(
+        access_id=ODPS_CONFIG['ACCESSID'],
+        secret_access_key=ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=ODPS_CONFIG['ENDPOINT'],
+        # connect_timeout=300000,
+        # read_timeout=500000,
+        # pool_maxsize=1000,
+        # pool_connections=1000
+    )
+    try:
+        t = odps.get_table(name=table)
+        # check_res = t.exist_partition(partition_spec=f'dt={partition}')
+        # 含有hh分区
+        # if not {partitionDtHh}:
+        check_res = t.exist_partition(partition_spec=f'dt={partitionDt},hh={partitionDtHh}')
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {partitionDt} and hh={partitionDtHh}'
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            data_count = 0
+        # else:
+        #     check_res = t.exist_partition(partition_spec=f'dt={partitionDt}')
+        #     if check_res:
+        #         sql = f'select * from {project}.{table} where dt = {partitionDt}'
+        #         with odps.execute_sql(sql=sql).open_reader() as reader:
+        #             data_count = reader.count
+        #     else:
+        #         data_count = 0
+    except Exception as e:
+        print("error:" + str(e))
+        data_count = 0
+    return data_count
+
+
+def generate_time_sequence(beginStr, endStr):
+    # 将字符串时间转换为datetime对象
+    from datetime import datetime, timedelta
+
+    # 定义时间格式
+    time_format = "%Y%m%d%H"
+
+    # 转换字符串为datetime对象
+    begin_time = datetime.strptime(beginStr, time_format)
+    end_time = datetime.strptime(endStr, time_format)
+
+    # 生成时间序列
+    time_sequence = []
+    current_time = begin_time
+    while current_time <= end_time:
+        # 将datetime对象转换回指定格式的字符串
+        time_sequence.append(current_time.strftime(time_format))
+        # 增加一个小时
+        current_time += timedelta(hours=1)
+
+    return time_sequence
+
+
+def split_date_time(date_time_str):
+    # 假设date_time_str是一个长度为12的字符串,格式为YYYYMMDDHH
+    # 切片获取日期部分(前8位)和时间部分(后4位中的前2位,因为后两位可能是分钟或秒,但这里只取小时)
+    date_part = date_time_str[:8]
+    time_part = date_time_str[8:10]  # 只取小时部分
+
+    # 将结果存储在一个数组中(在Python中通常使用列表)
+    result = [date_part, time_part]
+
+    return result
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(description='脚本utils')
+    parser.add_argument('--beginStr', type=str, help='表分区Dt,beginStr')
+    parser.add_argument('--endStr', type=str, help='表分区Hh,endStr')
+    parser.add_argument('--table', type=str, help='表名')
+    argv = parser.parse_args()
+    check_origin_hive(argv)

+ 119 - 0
qiaojialiang/check_auc.sh

@@ -0,0 +1,119 @@
+#!/bin/sh
+set -ex
+
+source /root/anaconda3/bin/activate py37
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+#today="$(date +%Y%m%d)"
+today=20240710
+#today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+today_early_3=20240703
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/13_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/16_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=akaqjl8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+
+
+
+# 0 对比AUC 前置对比2日模型数据 与 线上模型数据效果对比,如果2日模型优于线上,更新线上模型
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step4------------开始对比,新:${MODEL_PATH}/${model_name}_20240703.txt,与线上online模型数据auc效果"
+#$HADOOP fs -text ${bucketDataPath}/20240707/* | ${FM_HOME}/fm_predict -m ${LAST_MODEL_HOME}/model_online.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_${today}_online.txt
+#if [ $? -ne 0 ]; then
+#  echo "推荐线上模型AUC计算失败"
+#  /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐线上模型AUC计算失败"
+#else
+##  $HADOOP fs -text ${bucketDataPath}/20240707/* | ${FM_HOME}/fm_predict -m ${MODEL_PATH}/${model_name}_20240703.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_${today}_new.txt
+#  if [ $? -ne 0 ]; then
+#     echo "推荐新模型AUC计算失败"
+#     /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐新模型AUC计算失败${PREDICT_PATH}/${model_name}_${today}_new.txt"
+#  else
+    online_auc=`cat ${PREDICT_PATH}/${model_name}_${today}_online.txt | /root/sunmingze/AUC/AUC`
+    if [ $? -ne 0 ]; then
+       echo "推荐线上模型AUC计算失败"
+       /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐线上模型AUC计算失败"
+    else
+      new_auc=`cat ${PREDICT_PATH}/${model_name}_${today}_new.txt | /root/sunmingze/AUC/AUC`
+      if [ $? -ne 0 ]; then
+         echo "推荐新模型AUC计算失败"
+         /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐新模型AUC计算失败${PREDICT_PATH}/${model_name}_${today}_new.txt"
+      else
+        # 4.1 对比auc数据判断是否更新线上模型
+        if [ "$online_auc" \< "$new_auc" ]; then
+            echo "新模型优于线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}"
+            # 4.1.1 模型格式转换
+            cat ${MODEL_PATH}/${model_name}_20240703.txt |
+            awk -F " " '{
+                if (NR == 1) {
+                    print $1"\t"$2
+                } else {
+                    split($0, fields, " ");
+                    OFS="\t";
+                    line=""
+                    for (i = 1; i <= 10 && i <= length(fields); i++) {
+                        line = (line ? line "\t" : "") fields[i];
+                    }
+                    print line
+                }
+            }' > ${MODEL_PATH}/${model_name}_20240703_change.txt
+            if [ $? -ne 0 ]; then
+               echo "新模型文件格式转换失败"
+               /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4模型格式转换\n【是否成功】:error\n【信息】:新模型文件格式转换失败${MODEL_PATH}/${model_name}_20240703.txt"
+            else
+#              # 4.1.2 模型文件上传OSS
+#              online_model_path=${OSS_PATH}/${model_name}.txt
+#              $HADOOP fs -test -e ${online_model_path}
+#              if [ $? -eq 0 ]; then
+#                  echo "数据存在, 先删除。"
+#                  $HADOOP fs -rm -r -skipTrash ${online_model_path}
+#              else
+#                  echo "数据不存在"
+#              fi
+#              $HADOOP fs -put ${MODEL_PATH}/${model_name}_20240703_change.txt ${online_model_path}
+#              if [ $? -eq 0 ]; then
+#                 echo "推荐模型文件至OSS成功"
+#                  # 4.1.3 本地保存最新的线上使用的模型,用于下一次的AUC验证
+                 cp -f ${LAST_MODEL_HOME}/model_online.txt ${LAST_MODEL_HOME}/model_online_$(date +\%Y\%m\%d).txt
+                 cp -f ${MODEL_PATH}/${model_name}_20240703.txt ${LAST_MODEL_HOME}/model_online.txt
+                 if [ $? -ne 0 ]; then
+                     echo "模型备份失败"
+                 fi
+                 /root/anaconda3/bin/python monitor_util.py --level info --msg "荐模型数据更新 \n【任务名称】:step4模型更新\n【是否成功】:success\n【信息】:新模型优于线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc},已更新${model_name}_20240703.txt模型}"
+#              else
+#                 echo "推荐模型文件至OSS失败"
+#                 /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4模型推送oss\n【是否成功】:error\n【信息】:推荐模型文件至OSS失败${MODEL_PATH}/${model_name}_20240703_change.txt --- ${online_model_path}"
+#              fi
+            fi
+        else
+            echo "新模型不如线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}"
+            /root/anaconda3/bin/python monitor_util.py --level info --msg "荐模型数据更新 \n【任务名称】:step4模型更新\n【是否成功】:success\n【信息】:新模型不如线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}}"
+        fi
+      fi
+    fi
+#  fi
+#fi

+ 133 - 0
qiaojialiang/data_0729.sh

@@ -0,0 +1,133 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=model_nba8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+#OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/qiaojialiang/
+
+
+
+
+# 1 生产原始数据
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+#--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#tablePart:64 repartition:32 \
+#beginStr:${begin_early_2_Str}${beginHhStr} endStr:${end_early_2_Str}${endHhStr} \
+#savePath:${originDataPath} \
+#table:${table}
+#if [ $? -ne 0 ]; then
+#   echo "Spark原始样本生产任务执行失败"
+#   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step1根据${table}生产原始数据\n【是否成功】:error\n【信息】:Spark原始样本生产任务执行失败"
+#   exit 1
+#else
+#   echo "spark原始样本生产执行成功"
+#fi
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:2024072709 endStr:2024072709 \
+savePath:${originDataPath} \
+table:${table}
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------spark原始样本生产执行成功:2024072709"
+
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:2024072713 endStr:2024072715 \
+savePath:${originDataPath} \
+table:${table} &
+
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:2024072718 endStr:2024072723 \
+savePath:${originDataPath} \
+table:${table} &
+
+wait
+if [ $? -ne 0 ]; then
+   echo "Spark原始样本生产任务执行失败"
+   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step1根据${table}生产原始数据\n【是否成功】:error\n【信息】:Spark原始样本生产任务执行失败"
+   exit 1
+else
+   echo "spark原始样本生产执行成功"
+fi
+
+# 2 特征分桶
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据"
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:${originDataPath} \
+savePath:${bucketDataPath} \
+beginStr:20240727 endStr:20240727 repartition:500 \
+filterNames:XXXXXXXXX \
+fileName:20240609_bucket_314.txt \
+whatLabel:is_return whatApps:0,4,21,17
+if [ $? -ne 0 ]; then
+   echo "Spark特征分桶处理任务执行失败"
+   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step3训练数据产出\n【是否成功】:error\n【信息】:Spark特征分桶处理任务执行失败"
+   exit 1
+else
+   echo "spark特征分桶处理执行成功"
+fi
+
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step4------------开始模型训练,增量训练:${MODEL_PATH}/${model_name}_${today_early_3}.txt"
+#$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_train -m ${MODEL_PATH}/${model_name}_${begin_early_2_Str}.txt -dim 1,1,8 -im ${LAST_MODEL_HOME}/model_online.txt -core 8
+$HADOOP fs -text ${bucketDataPath}/20240727/* | ${FM_HOME}/fm_train -m ${MODEL_PATH}/${model_name}_20240727.txt -dim 1,1,8 -im /root/joe/model_online/model_online.txt -core 8
+if [ $? -ne 0 ]; then
+   echo "模型训练失败"
+   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step5模型训练\n【是否成功】:error\n【信息】:${bucketDataPath}/20240727训练失败"
+fi
+
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step5------------模型训练完成:${MODEL_PATH}/${model_name}_20240727.txt"
+

+ 45 - 0
qiaojialiang/delPredictFile.sh

@@ -0,0 +1,45 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+today="$(date -d '3 days ago' +%Y%m%d)"
+model_name=model_nba8
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+fileName1=${PREDICT_PATH}/${model_name}_${today}_online.txt
+fileName2=${PREDICT_PATH}/${model_name}_${today}_new.txt
+
+
+if [ -f "${fileName1}" ]; then
+    echo "文件 ${fileName1} 存在,正在删除..."
+    # 使用rm命令删除文件
+    rm "${fileName1}"
+#    mv "${fileName1}"  ${PREDICT_PATH}/xxx_online.txt
+    if [ $? -eq 0 ]; then
+        echo "文件 ${fileName1} 已成功删除。"
+    else
+        echo "删除文件 ${fileName1} 时出错。"
+    fi
+else
+    echo "文件 ${fileName1} 不存在。"
+fi
+
+
+if [ -f "${fileName2}" ]; then
+    echo "文件 ${fileName2} 存在,正在删除..."
+    # 使用rm命令删除文件
+    rm "${fileName2}"
+#    mv "${fileName2}"  ${PREDICT_PATH}/xxx_new.txt
+    if [ $? -eq 0 ]; then
+        echo "文件 ${fileName2} 已成功删除。"
+    else
+        echo "删除文件 ${fileName2} 时出错。"
+    fi
+else
+    echo "文件 ${fileName2} 不存在。"
+fi

+ 65 - 0
qiaojialiang/handle_0724.sh

@@ -0,0 +1,65 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=model_nba8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+
+
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据"
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:${originDataPath} \
+savePath:${bucketDataPath} \
+beginStr:20240716 endStr:20240722 repartition:500 \
+filterNames:XXXXXXXXX \
+fileName:20240609_bucket_314.txt \
+whatLabel:is_return whatApps:0,4,21,17
+if [ $? -ne 0 ]; then
+   echo "Spark特征分桶处理任务执行失败"
+   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step3训练数据产出\n【是否成功】:error\n【信息】:Spark特征分桶处理任务执行失败"
+   exit 1
+else
+   echo "spark特征分桶处理执行成功"
+fi
+

+ 233 - 0
qiaojialiang/handle_rov.sh

@@ -0,0 +1,233 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=model_nba8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+#OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/qiaojialiang/
+
+# 0 判断上游表是否生产完成,最长等待到max_hour点
+# shellcheck disable=SC2154
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step0------------开始校验是否生产完数据,分区信息:beginStr:${begin_early_2_Str}${beginHhStr},endStr:${end_early_2_Str}${endHhStr}"
+while true; do
+  python_return_code=$(python /root/joe/recommend-emr-dataprocess/qiaojialiang/checkHiveDataUtil.py --table ${table} --beginStr ${begin_early_2_Str}${beginHhStr} --endStr ${end_early_2_Str}${endHhStr})
+  echo "python 返回值:${python_return_code}"
+  if [ $python_return_code -eq 0 ]; then
+    echo "Python程序返回0,校验存在数据,退出循环。"
+    break
+  fi
+  echo "Python程序返回非0值,不存在数据,等待五分钟后再次调用。"
+  sleep 300
+  current_hour=$(date +%H)
+  current_minute=$(date +%M)
+  # shellcheck disable=SC2039
+  if (( current_hour > max_hour || (current_hour == max_hour && current_minute >= max_minute) )); then
+    echo "最长等待时间已到,失败:${current_hour}-${current_minute}"
+    /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step0校验是否生产完数据\n【是否成功】:error\n【信息】:最长等待时间已到,失败:${current_hour}-${current_minute}"
+    exit 1
+  fi
+done
+
+# 1 生产原始数据
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+#--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#tablePart:64 repartition:32 \
+#beginStr:${begin_early_2_Str}${beginHhStr} endStr:${end_early_2_Str}${endHhStr} \
+#savePath:${originDataPath} \
+#table:${table}
+#if [ $? -ne 0 ]; then
+#   echo "Spark原始样本生产任务执行失败"
+#   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step1根据${table}生产原始数据\n【是否成功】:error\n【信息】:Spark原始样本生产任务执行失败"
+#   exit 1
+#else
+#   echo "spark原始样本生产执行成功"
+#fi
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:${begin_early_2_Str}00 endStr:${end_early_2_Str}09 \
+savePath:${originDataPath} \
+table:${table} &
+
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:${begin_early_2_Str}10 endStr:${end_early_2_Str}15 \
+savePath:${originDataPath} \
+table:${table} &
+
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:${begin_early_2_Str}16 endStr:${end_early_2_Str}23 \
+savePath:${originDataPath} \
+table:${table} &
+
+wait
+if [ $? -ne 0 ]; then
+   echo "Spark原始样本生产任务执行失败"
+   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step1根据${table}生产原始数据\n【是否成功】:error\n【信息】:Spark原始样本生产任务执行失败"
+   exit 1
+else
+   echo "spark原始样本生产执行成功"
+fi
+
+# 2 特征分桶
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据"
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:${originDataPath} \
+savePath:${bucketDataPath} \
+beginStr:${begin_early_2_Str} endStr:${end_early_2_Str} repartition:500 \
+filterNames:XXXXXXXXX \
+fileName:20240609_bucket_314.txt \
+whatLabel:is_return whatApps:0,4,21,17
+if [ $? -ne 0 ]; then
+   echo "Spark特征分桶处理任务执行失败"
+   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step3训练数据产出\n【是否成功】:error\n【信息】:Spark特征分桶处理任务执行失败"
+   exit 1
+else
+   echo "spark特征分桶处理执行成功"
+fi
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step5------------spark特征分桶处理执行成功:${begin_early_2_Str}"
+
+# 3 对比AUC 前置对比3日模型数据 与 线上模型数据效果对比,如果3日模型优于线上,更新线上模型
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step3------------开始对比,新:${MODEL_PATH}/${model_name}_${today_early_3}.txt,与线上online模型数据auc效果"
+#$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_predict -m ${LAST_MODEL_HOME}/model_online.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_${today}_online.txt
+#if [ $? -ne 0 ]; then
+#  echo "推荐线上模型AUC计算失败"
+#  /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐线上模型AUC计算失败"
+#else
+#  $HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_predict -m ${MODEL_PATH}/${model_name}_${today_early_3}.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_${today}_new.txt
+#  if [ $? -ne 0 ]; then
+#     echo "推荐新模型AUC计算失败"
+#     /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐新模型AUC计算失败${PREDICT_PATH}/${model_name}_${today}_new.txt"
+#  else
+#    online_auc=`cat ${PREDICT_PATH}/${model_name}_${today}_online.txt | /root/sunmingze/AUC/AUC`
+#    if [ $? -ne 0 ]; then
+#       echo "推荐线上模型AUC计算失败"
+#       /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐线上模型AUC计算失败"
+#    else
+#      new_auc=`cat ${PREDICT_PATH}/${model_name}_${today}_new.txt | /root/sunmingze/AUC/AUC`
+#      if [ $? -ne 0 ]; then
+#         echo "推荐新模型AUC计算失败"
+#         /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐新模型AUC计算失败${PREDICT_PATH}/${model_name}_${today}_new.txt"
+#      else
+#        # 4.1 对比auc数据判断是否更新线上模型
+#        if [ "$online_auc" \< "$new_auc" ]; then
+#            echo "新模型优于线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}"
+#            # 4.1.1 模型格式转换
+#            cat ${MODEL_PATH}/${model_name}_${today_early_3}.txt |
+#            awk -F " " '{
+#                if (NR == 1) {
+#                    print $1"\t"$2
+#                } else {
+#                    split($0, fields, " ");
+#                    OFS="\t";
+#                    line=""
+#                    for (i = 1; i <= 10 && i <= length(fields); i++) {
+#                        line = (line ? line "\t" : "") fields[i];
+#                    }
+#                    print line
+#                }
+#            }' > ${MODEL_PATH}/${model_name}_${today_early_3}_change.txt
+#            if [ $? -ne 0 ]; then
+#               echo "新模型文件格式转换失败"
+#               /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4模型格式转换\n【是否成功】:error\n【信息】:新模型文件格式转换失败${MODEL_PATH}/${model_name}_${today_early_3}.txt"
+#            else
+#              # 4.1.2 模型文件上传OSS
+#              online_model_path=${OSS_PATH}/${model_name}.txt
+#              $HADOOP fs -test -e ${online_model_path}
+#              if [ $? -eq 0 ]; then
+#                  echo "数据存在, 先删除。"
+#                  $HADOOP fs -rm -r -skipTrash ${online_model_path}
+#              else
+#                  echo "数据不存在"
+#              fi
+#              $HADOOP fs -put ${MODEL_PATH}/${model_name}_${today_early_3}_change.txt ${online_model_path}
+#              if [ $? -eq 0 ]; then
+#                 echo "推荐模型文件至OSS成功"
+#                  # 4.1.3 本地保存最新的线上使用的模型,用于下一次的AUC验证
+#                 cp -f ${LAST_MODEL_HOME}/model_online.txt ${LAST_MODEL_HOME}/model_online_$(date +\%Y\%m\%d).txt
+#                 cp -f ${MODEL_PATH}/${model_name}_${today_early_3}.txt ${LAST_MODEL_HOME}/model_online.txt
+#                 if [ $? -ne 0 ]; then
+#                     echo "模型备份失败"
+#                 fi
+#                 /root/anaconda3/bin/python monitor_util.py --level info --msg "荐模型数据更新 \n【任务名称】:step4模型更新\n【是否成功】:success\n【信息】:新模型优于线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc},已更新${model_name}_${today_early_3}.txt模型}"
+#              else
+#                 echo "推荐模型文件至OSS失败"
+#                 /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step4模型推送oss\n【是否成功】:error\n【信息】:推荐模型文件至OSS失败${MODEL_PATH}/${model_name}_${today_early_3}_change.txt --- ${online_model_path}"
+#              fi
+#            fi
+#        else
+#            echo "新模型不如线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}"
+#            /root/anaconda3/bin/python monitor_util.py --level info --msg "荐模型数据更新 \n【任务名称】:step4模型更新\n【是否成功】:success\n【信息】:新模型不如线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc},${MODEL_PATH}/${model_name}_${today_early_3}.txt"
+#        fi
+#      fi
+#    fi
+#  fi
+#fi
+
+# 4 模型训练
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step4------------开始模型训练,增量训练:${MODEL_PATH}/${model_name}_${today_early_3}.txt"
+##$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_train -m ${MODEL_PATH}/${model_name}_${begin_early_2_Str}.txt -dim 1,1,8 -im ${LAST_MODEL_HOME}/model_online.txt -core 8
+#$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_train -m ${MODEL_PATH}/${model_name}_${begin_early_2_Str}.txt -dim 1,1,8 -im ${MODEL_PATH}/${model_name}_${today_early_3}.txt -core 8
+#if [ $? -ne 0 ]; then
+#   echo "模型训练失败"
+#   /root/anaconda3/bin/python monitor_util.py --level error --msg "荐模型数据更新 \n【任务名称】:step5模型训练\n【是否成功】:error\n【信息】:${bucketDataPath}/${begin_early_2_Str}训练失败"
+#fi
+
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step5------------模型训练完成:${MODEL_PATH}/${model_name}_${begin_early_2_Str}.txt"
+
+
+
+

+ 104 - 0
qiaojialiang/monitor_util.py

@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+import argparse
+import datetime
+import json
+
+import requests
+
+server_robot = {
+    'webhook': 'https://open.feishu.cn/open-apis/bot/v2/hook/cbf0bc0f-994a-489b-9f77-de0c5be107cd',
+}
+
+level_header_template_map = {
+    "info": "turquoise",
+    "error": "red",
+    "warn": "yellow"
+}
+
+level_header_title_content_map = {
+    "info": "推荐模型自动更新通知",
+    "error": "推荐模型自动更新告警",
+    "warn": "推荐模型自动更新告警"
+}
+
+
+def send_card_msg_to_feishu(webhook, card_json):
+    """发送消息到飞书"""
+    headers = {'Content-Type': 'application/json'}
+    payload_message = {
+        "msg_type": "interactive",
+        "card": card_json
+    }
+    print(f"推送飞书消息内容: {json.dumps(payload_message)}")
+    response = requests.request('POST', url=webhook, headers=headers, data=json.dumps(payload_message))
+    print(response.text)
+
+
+def seconds_convert(seconds):
+    hours = seconds // 3600
+    minutes = (seconds % 3600) // 60
+    seconds = seconds % 60
+    return f"{hours}小时 {minutes}分钟 {seconds}秒"
+
+
+def _monitor(level, msg: str, start, elapsed):
+    """消息推送"""
+    now = datetime.datetime.now()
+    msg = msg.replace("\\n", "\n").replace("\\t", "\t")
+    mgs_text = f"- 当前时间: {now.strftime('%Y-%m-%d %H:%M:%S')}" \
+               f"\n- 任务描述: {msg}"
+    # f"\n- 任务开始时间: {start}" \
+    # f"\n- 任务耗时: {seconds_convert(elapsed)}" \
+    card_json = {
+        "config": {},
+        "i18n_elements": {
+            "zh_cn": [
+                {
+                    "tag": "markdown",
+                    "content": "",
+                    "text_align": "left",
+                    "text_size": "normal"
+                },
+                {
+                    "tag": "markdown",
+                    "content": mgs_text,
+                    "text_align": "left",
+                    "text_size": "normal"
+                }
+            ]
+        },
+        "i18n_header": {
+            "zh_cn": {
+                "title": {
+                    "tag": "plain_text",
+                    "content": level_header_title_content_map[level]
+                },
+                "subtitle": {
+                    "tag": "plain_text",
+                    "content": ""
+                },
+                "template": level_header_template_map[level]
+            }
+        }
+    }
+
+    send_card_msg_to_feishu(
+        webhook=server_robot.get('webhook'),
+        card_json=card_json
+    )
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(description='告警Utils')
+    parser.add_argument('--level', type=str, help='通知级别, info, warn, error', required=True)
+    parser.add_argument('--msg', type=str, help='消息', required=True)
+    # parser.add_argument('--start', type=str, help='任务开始时间', required=True)
+    # parser.add_argument('--elapsed', type=int, help='任务耗时【秒】', required=True)
+    args = parser.parse_args()
+
+    _monitor(
+        level=args.level,
+        msg=args.msg,
+        start="",
+        elapsed=0
+    )

+ 41 - 0
qiaojialiang/test/demo01.py

@@ -0,0 +1,41 @@
+import subprocess
+from datetime import datetime
+
+# 获取当前日期,并格式化为字符串
+now = datetime.now()
+date_str = now.strftime("%Y-%m-%d")
+
+# 定义日志文件名
+log_file = f"script_output_{date_str}.log"
+
+n1 = 1
+n2 = 5
+
+# 定义Shell脚本的路径
+script1_path = "script1.sh"
+script2_path = "script2.sh"
+
+# 打开日志文件准备写入
+with open(log_file, 'w') as f:
+    # 调用script1.sh,不使用shell=True
+    result1 = subprocess.run([script1_path, str(n1), str(n2)], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                             text=True)
+
+    # 检查script1.sh的返回码,并写入输出到日志文件
+    if result1.returncode == 0 and not result1.stderr:
+        print("script1.sh 执行成功")
+        f.write(f"--- script1.sh 输出 ---\n{result1.stdout}\n")
+
+        # 调用script2.sh,同样不使用shell=True
+        result2 = subprocess.run([script2_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+
+        # 检查script2.sh的返回码,并写入输出到日志文件
+        if result2.returncode == 0 and not result2.stderr:
+            print("script2.sh 执行成功")
+            f.write(f"--- script2.sh 输出 ---\n{result2.stdout}\n")
+        else:
+            print("script2.sh 执行失败")
+            f.write(f"--- script2.sh 错误输出 ---\n{result2.stderr}\n")
+    else:
+        print("script1.sh 执行失败")
+        f.write(f"--- script1.sh 错误输出 ---\n{result1.stderr}\n")

+ 202 - 0
qiaojialiang/test/handle_rov_bak.sh

@@ -0,0 +1,202 @@
+#!/bin/sh
+set -ex
+
+source /root/anaconda3/bin/activate py37
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/13_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/16_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=aka8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+
+# 0 判断上游表是否生产完成,最长等待到max_hour点
+# shellcheck disable=SC2154
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step0------------开始校验是否生产完数据,分区信息:beginStr:${begin_early_2_Str}${beginHhStr},endStr:${end_early_2_Str}${endHhStr}"
+while true; do
+  python_return_code=$(python /root/joe/recommend-emr-dataprocess/qiaojialiang/checkHiveDataUtil.py --table ${table} --beginStr ${begin_early_2_Str}${beginHhStr} --endStr ${end_early_2_Str}${endHhStr})
+  echo "python 返回值:${python_return_code}"
+  if [ $python_return_code -eq 0 ]; then
+    echo "Python程序返回0,校验存在数据,退出循环。"
+    break
+  fi
+  echo "Python程序返回非0值,不存在数据,等待五分钟后再次调用。"
+  sleep 300
+  current_hour=$(date +%H)
+  current_minute=$(date +%M)
+  # shellcheck disable=SC2039
+  if (( current_hour > max_hour || (current_hour == max_hour && current_minute >= max_minute) )); then
+    echo "最长等待时间已到,失败:${current_hour}-${current_minute}"
+    python FeishuBot.py "荐模型数据更新 \n【任务名称】:step0校验是否生产完数据\n【是否成功】:error\n【信息】:最长等待时间已到,失败:${current_hour}-${current_minute}"
+    exit 1
+  fi
+done
+
+# 1 生产原始数据
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step1------------开始根据${table}生产原始数据"
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_qiao.makedata_13_originData_20240705 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:${begin_early_2_Str}${beginHhStr} endStr:${end_early_2_Str}${endHhStr} \
+savePath:${originDataPath} \
+table:${table}
+if [ $? -ne 0 ]; then
+   echo "Spark原始样本生产任务执行失败"
+   python FeishuBot.py "荐模型数据更新 \n【任务名称】:step1根据${table}生产原始数据\n【是否成功】:error\n【信息】:Spark原始样本生产任务执行失败"
+   exit 1
+else
+   echo "spark原始样本生产执行成功"
+fi
+
+
+# 2 特征值拼接
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------开始特征值拼接"
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_qiao.makedata_14_valueData_20240705 \
+--master yarn --driver-memory 1G --executor-memory 3G --executor-cores 1 --num-executors 32 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:${originDataPath} \
+savePath:${valueDataPath} \
+beginStr:${begin_early_2_Str} endStr:${end_early_2_Str} repartition:1000
+if [ $? -ne 0 ]; then
+   echo "Spark特征值拼接处理任务执行失败"
+   python FeishuBot.py "荐模型数据更新 \n【任务名称】:step2特征值拼接\n【是否成功】:error\n【信息】:Spark特征值拼接处理任务执行失败"
+   exit 1
+else
+   echo "spark特征值拼接处理执行成功"
+fi
+
+# 3 特征分桶
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step3------------根据特征分桶生产重打分特征数据"
+/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_qiao.makedata_16_bucketData_20240705 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:${valueDataPath} \
+savePath:${bucketDataPath} \
+beginStr:${begin_early_2_Str} endStr:${end_early_2_Str} repartition:1000
+if [ $? -ne 0 ]; then
+   echo "Spark特征分桶处理任务执行失败"
+   python FeishuBot.py "荐模型数据更新 \n【任务名称】:step3训练数据产出\n【是否成功】:error\n【信息】:Spark特征分桶处理任务执行失败"
+   exit 1
+else
+   echo "spark特征分桶处理执行成功"
+fi
+
+
+# 4 对比AUC 前置对比3日模型数据 与 线上模型数据效果对比,如果3日模型优于线上,更新线上模型
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step4------------开始对比,新:${MODEL_PATH}/${model_name}_${today_early_3}.txt,与线上online模型数据auc效果"
+$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_predict -m ${LAST_MODEL_HOME}/model_online.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_${today}_online.txt
+$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_predict -m ${MODEL_PATH}/${model_name}_${today_early_3}.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_${today}_new.txt
+
+online_auc=`cat ${PREDICT_PATH}/${model_name}_${today}_online.txt | /root/sunmingze/AUC/AUC`
+if [ $? -ne 0 ]; then
+   echo "推荐线上模型AUC计算失败"
+   python FeishuBot.py "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐线上模型AUC计算失败"
+   exit 1
+fi
+
+new_auc=`cat ${PREDICT_PATH}/${model_name}_${today}_new.txt | /root/sunmingze/AUC/AUC`
+if [ $? -ne 0 ]; then
+   echo "推荐新模型AUC计算失败"
+   python FeishuBot.py "荐模型数据更新 \n【任务名称】:step4新旧模型AUC对比\n【是否成功】:error\n【信息】:推荐新模型AUC计算失败${PREDICT_PATH}/${model_name}_${today}_new.txt"
+   exit 1
+fi
+
+
+# 4.1 对比auc数据判断是否更新线上模型
+if [ "$online_auc" \< "$new_auc" ]; then
+    echo "新模型优于线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}"
+    # 4.1.1 模型格式转换
+    cat ${MODEL_PATH}/${model_name}_${today_early_3}.txt |
+    awk -F " " '{
+         if (NR == 1) {
+              print $1"\t"$2
+         } else {
+              split($0, fields, " ");
+              OFS="\t";
+              line=""
+              for (i = 1; i <= 10 && i <= length(fields); i++) {
+                  line = (line ? line "\t" : "") fields[i];
+              }
+              print line
+         }
+    }' > ${MODEL_PATH}/${model_name}_${today_early_3}_change.txt
+    if [ $? -ne 0 ]; then
+       echo "新模型文件格式转换失败"
+       python FeishuBot.py "荐模型数据更新 \n【任务名称】:step4模型格式转换\n【是否成功】:error\n【信息】:新模型文件格式转换失败${MODEL_PATH}/${model_name}_${today_early_3}.txt"
+       exit 1
+    fi
+    # 4.1.2 模型文件上传OSS
+    online_model_path=${OSS_PATH}/${model_name}.txt
+    $HADOOP fs -test -e ${online_model_path}
+    if [ $? -eq 0 ]; then
+        echo "数据存在, 先删除。"
+        $HADOOP fs -rm -r -skipTrash ${online_model_path}
+    else
+        echo "数据不存在"
+    fi
+
+    $HADOOP fs -put ${MODEL_PATH}/${model_name}_${today_early_3}_change.txt ${online_model_path}
+    if [ $? -eq 0 ]; then
+       echo "推荐模型文件至OSS成功"
+    else
+       echo "推荐模型文件至OSS失败"
+       python FeishuBot.py "荐模型数据更新 \n【任务名称】:step4模型推送oss\n【是否成功】:error\n【信息】:推荐模型文件至OSS失败${MODEL_PATH}/${model_name}_${today_early_3}_change.txt --- ${online_model_path}"
+       exit 1
+    fi
+    # 4.1.3 本地保存最新的线上使用的模型,用于下一次的AUC验证
+    cp -f ${LAST_MODEL_HOME}/model_online.txt ${LAST_MODEL_HOME}/model_online_$(date +\%Y\%m\%d).txt
+    cp -f ${MODEL_PATH}/${model_name}_${today_early_3}.txt ${LAST_MODEL_HOME}/model_online.txt
+    if [ $? -ne 0 ]; then
+       echo "模型备份失败"
+       exit 1
+    fi
+    python FeishuBot.py "荐模型数据更新 \n【任务名称】:step4模型更新\n【是否成功】:success\n【信息】:新模型优于线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc},已更新${model_name}_${today_early_3}.txt模型}"
+else
+    echo "新模型不如线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}"
+    python FeishuBot.py "荐模型数据更新 \n【任务名称】:step4模型更新\n【是否成功】:success\n【信息】:新模型不如线上模型: 线上模型AUC: ${online_auc}, 新模型AUC: ${new_auc}}"
+fi
+
+
+
+# 5 模型训练
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step5------------开始模型训练"
+$HADOOP fs -text ${bucketDataPath}/${begin_early_2_Str}/* | ${FM_HOME}/fm_train -m ${MODEL_PATH}/${model_name}_${begin_early_2_Str}.txt -dim 1,1,8 -im ${LAST_MODEL_HOME}/model_online.txt -core 8
+if [ $? -ne 0 ]; then
+   echo "模型训练失败"
+   python FeishuBot.py "荐模型数据更新 \n【任务名称】:step5模型训练\n【是否成功】:error\n【信息】:${bucketDataPath}/${begin_early_2_Str}训练失败"
+   exit 1
+fi
+
+echo "$(date +%Y-%m-%d_%H-%M-%S)----------step6------------模型训练完成:${MODEL_PATH}/${model_name}_${begin_early_2_Str}.txt"

+ 28 - 0
qiaojialiang/test/multiTest.sh

@@ -0,0 +1,28 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+
+# 启动第一个命令并放入后台
+echo "Starting command 1"
+sleep 5 &
+
+# 启动第二个命令并放入后台
+echo "Starting command 2"
+sleep 3 &
+
+# 启动第三个命令并放入后台
+echo "Starting command 3"
+sleep 4 &
+
+# 使用wait等待所有后台命令完成
+wait
+
+# 所有命令执行完毕后执行的命令
+echo "All commands have completed successfully."

+ 32 - 0
qiaojialiang/test/script1.sh

@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# 检查是否提供了两个参数
+if [ "$#" -ne 2 ]; then
+    echo "用法: $0 n1 n2"
+    echo "此脚本输出从n1到n2(包含)的所有数字。"
+    exit 1
+fi
+
+n1=$1
+n2=$2
+
+# 检查n1和n2是否为整数
+if ! [[ "$n1" =~ ^[0-9]+$ ]] || ! [[ "$n2" =~ ^[0-9]+$ ]]; then
+    echo "错误:n1和n2都必须是正整数。"
+    exit 1
+fi
+
+# 确保n1不大于n2
+if [ "$n1" -gt "$n2" ]; then
+    echo "错误:n1必须小于或等于n2。"
+    exit 1
+fi
+
+# 输出从n1到n2的数字
+for (( i=$n1; i<=$n2; i++ ))
+do
+    echo $i
+    sleep 1
+done
+
+exit 0

+ 4 - 0
qiaojialiang/test/script2.sh

@@ -0,0 +1,4 @@
+#!/bin/bash
+echo "执行第二个脚本"
+# 这里添加你的脚本逻辑
+exit 0

+ 53 - 0
qiaojialiang/xunlian.sh

@@ -0,0 +1,53 @@
+#!/bin/sh
+set -x
+
+source /root/anaconda3/bin/activate py37
+
+export SPARK_HOME=/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8
+export PATH=$SPARK_HOME/bin:$PATH
+export HADOOP_CONF_DIR=/etc/taihao-apps/hadoop-conf
+export JAVA_HOME=/usr/lib/jvm/java-1.8.0
+
+#  nohup sh handle_rov.sh > "$(date +%Y%m%d_%H%M%S)_handle_rov.log" 2>&1 &
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=model_nba8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+
+
+$HADOOP fs -text ${bucketDataPath}/20240713/* | ${FM_HOME}/fm_train -m ${MODEL_PATH}/${model_name}_0709_0713.txt -dim 1,1,8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_0709_0712.txt -core 8
+
+$HADOOP fs -text ${bucketDataPath}/20240714/* | ${FM_HOME}/fm_predict -m ${MODEL_PATH}/${model_name}_0709_0713.txt -dim 8 -core 8 -out ${PREDICT_PATH}/${model_name}_0709_0713_new.txt
+
+new_auc=`cat ${PREDICT_PATH}/${model_name}_0709_0713_new.txt | /root/sunmingze/AUC/AUC`
+
+echo "0709-0713 auc:${new_auc}"

+ 130 - 0
qiaojialiang/xunlian_0724.sh

@@ -0,0 +1,130 @@
+#!/bin/sh
+set -ex
+
+
+# 原始数据table name
+table='alg_recsys_sample_all'
+today="$(date +%Y%m%d)"
+today_early_3="$(date -d '3 days ago' +%Y%m%d)"
+#table='alg_recsys_sample_all_test'
+# 处理分区配置 推荐数据间隔一天生产,所以5日0点使用3日0-23点数据生产new模型数据
+begin_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+end_early_2_Str="$(date -d '2 days ago' +%Y%m%d)"
+beginHhStr=00
+endHhStr=23
+max_hour=05
+max_minute=00
+# 各节点产出hdfs文件绝对路径
+# 源数据文件
+originDataPath=/dw/recommend/model/41_recsys_sample_data/
+# 特征值
+valueDataPath=/dw/recommend/model/14_feature_data/
+# 特征分桶
+bucketDataPath=/dw/recommend/model/43_recsys_train_data/
+# 模型数据路径
+MODEL_PATH=/root/joe/recommend-emr-dataprocess/model
+# 预测路径
+PREDICT_PATH=/root/joe/recommend-emr-dataprocess/predict
+# 历史线上正在使用的模型数据路径
+LAST_MODEL_HOME=/root/joe/model_online
+# 模型数据文件前缀
+model_name=model_nba8
+# fm模型
+FM_HOME=/root/sunmingze/alphaFM/bin
+# hadoop
+HADOOP=/opt/apps/HADOOP-COMMON/hadoop-common-current/bin/hadoop
+OSS_PATH=oss://art-recommend.oss-cn-hangzhou.aliyuncs.com/zhangbo/
+FM_TRAIN="/root/sunmingze/alphaFM/bin/fm_train"
+
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据20240717"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+#--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#readPath:${originDataPath} \
+#savePath:${bucketDataPath} \
+#beginStr:20240717 endStr:20240717 repartition:500 \
+#filterNames:XXXXXXXXX \
+#fileName:20240609_bucket_314.txt \
+#whatLabel:is_return whatApps:0,4,21,17
+#
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据20240718"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+#--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#readPath:${originDataPath} \
+#savePath:${bucketDataPath} \
+#beginStr:20240718 endStr:20240718 repartition:500 \
+#filterNames:XXXXXXXXX \
+#fileName:20240609_bucket_314.txt \
+#whatLabel:is_return whatApps:0,4,21,17
+#
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据20240719"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+#--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#readPath:${originDataPath} \
+#savePath:${bucketDataPath} \
+#beginStr:20240719 endStr:20240719 repartition:500 \
+#filterNames:XXXXXXXXX \
+#fileName:20240609_bucket_314.txt \
+#whatLabel:is_return whatApps:0,4,21,17
+#
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据20240720"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+#--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#readPath:${originDataPath} \
+#savePath:${bucketDataPath} \
+#beginStr:20240720 endStr:20240720 repartition:500 \
+#filterNames:XXXXXXXXX \
+#fileName:20240609_bucket_314.txt \
+#whatLabel:is_return whatApps:0,4,21,17
+#
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据20240721"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+#--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#readPath:${originDataPath} \
+#savePath:${bucketDataPath} \
+#beginStr:20240721 endStr:20240721 repartition:500 \
+#filterNames:XXXXXXXXX \
+#fileName:20240609_bucket_314.txt \
+#whatLabel:is_return whatApps:0,4,21,17
+#
+#echo "$(date +%Y-%m-%d_%H-%M-%S)----------step2------------根据特征分桶生产重打分特征数据20240722"
+#/opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+#--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+#../target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#readPath:${originDataPath} \
+#savePath:${bucketDataPath} \
+#beginStr:20240722 endStr:20240722 repartition:500 \
+#filterNames:XXXXXXXXX \
+#fileName:20240609_bucket_314.txt \
+#whatLabel:is_return whatApps:0,4,21,17
+
+
+
+$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240717/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_17.txt -dim 1,1,8 -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_20240716.txt
+echo '$(date +%Y-%m-%d_%H-%M-%S)----step------out model 0709~0722: /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_17.txt'
+
+$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240718/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_18.txt -dim 1,1,8 -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_17.txt
+echo '$(date +%Y-%m-%d_%H-%M-%S)----step------out model 0709~0722: /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_18.txt'
+
+$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240719/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_19.txt -dim 1,1,8 -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_18.txt
+echo '$(date +%Y-%m-%d_%H-%M-%S)----step------out model 0709~0722: /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_19.txt'
+
+$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240720/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_20.txt -dim 1,1,8 -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_19.txt
+echo '$(date +%Y-%m-%d_%H-%M-%S)----step------out model 0709~0722: /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_20.txt'
+
+$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240721/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_21.txt -dim 1,1,8 -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_20.txt
+echo '$(date +%Y-%m-%d_%H-%M-%S)----step------out model 0709~0722: /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_21.txt'
+
+$HADOOP fs -text /dw/recommend/model/43_recsys_train_data/20240722/* | ${FM_TRAIN} -m /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22.txt -dim 1,1,8 -core 8 -im /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_21.txt
+echo '$(date +%Y-%m-%d_%H-%M-%S)----step------out model 0709~0722: /root/joe/recommend-emr-dataprocess/model/model_nba8_all_9_22.txt'
+

+ 8 - 0
spark-examples.iml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module version="4">
+  <component name="FacetManager">
+    <facet type="Python" name="Python">
+      <configuration sdkName="Python 3.12 (recommend-emr-dataprocess)" />
+    </facet>
+  </component>
+</module>

文件差异内容过多而无法显示
+ 0 - 2
src/main/resources/20240609_bucket_274_old.txt


文件差异内容过多而无法显示
+ 0 - 0
src/main/resources/20240609_bucket_314.txt


+ 0 - 0
src/main/resources/20240709_recsys_bucket_314.txt


+ 314 - 0
src/main/resources/20240709_recsys_feature_name_314.txt

@@ -0,0 +1,314 @@
+b123_1h_STR
+b123_1h_log(share)
+b123_1h_ROV
+b123_1h_log(return)
+b123_1h_ROV*log(return)
+b123_1h_ROS
+b123_2h_STR
+b123_2h_log(share)
+b123_2h_ROV
+b123_2h_log(return)
+b123_2h_ROV*log(return)
+b123_2h_ROS
+b123_3h_STR
+b123_3h_log(share)
+b123_3h_ROV
+b123_3h_log(return)
+b123_3h_ROV*log(return)
+b123_3h_ROS
+b123_4h_STR
+b123_4h_log(share)
+b123_4h_ROV
+b123_4h_log(return)
+b123_4h_ROV*log(return)
+b123_4h_ROS
+b123_12h_STR
+b123_12h_log(share)
+b123_12h_ROV
+b123_12h_log(return)
+b123_12h_ROV*log(return)
+b123_12h_ROS
+b123_1d_STR
+b123_1d_log(share)
+b123_1d_ROV
+b123_1d_log(return)
+b123_1d_ROV*log(return)
+b123_1d_ROS
+b123_3d_STR
+b123_3d_log(share)
+b123_3d_ROV
+b123_3d_log(return)
+b123_3d_ROV*log(return)
+b123_3d_ROS
+b123_7d_STR
+b123_7d_log(share)
+b123_7d_ROV
+b123_7d_log(return)
+b123_7d_ROV*log(return)
+b123_7d_ROS
+b167_1h_STR
+b167_1h_log(share)
+b167_1h_ROV
+b167_1h_log(return)
+b167_1h_ROV*log(return)
+b167_1h_ROS
+b167_2h_STR
+b167_2h_log(share)
+b167_2h_ROV
+b167_2h_log(return)
+b167_2h_ROV*log(return)
+b167_2h_ROS
+b167_3h_STR
+b167_3h_log(share)
+b167_3h_ROV
+b167_3h_log(return)
+b167_3h_ROV*log(return)
+b167_3h_ROS
+b167_4h_STR
+b167_4h_log(share)
+b167_4h_ROV
+b167_4h_log(return)
+b167_4h_ROV*log(return)
+b167_4h_ROS
+b167_12h_STR
+b167_12h_log(share)
+b167_12h_ROV
+b167_12h_log(return)
+b167_12h_ROV*log(return)
+b167_12h_ROS
+b167_1d_STR
+b167_1d_log(share)
+b167_1d_ROV
+b167_1d_log(return)
+b167_1d_ROV*log(return)
+b167_1d_ROS
+b167_3d_STR
+b167_3d_log(share)
+b167_3d_ROV
+b167_3d_log(return)
+b167_3d_ROV*log(return)
+b167_3d_ROS
+b167_7d_STR
+b167_7d_log(share)
+b167_7d_ROV
+b167_7d_log(return)
+b167_7d_ROV*log(return)
+b167_7d_ROS
+b8910_1h_STR
+b8910_1h_log(share)
+b8910_1h_ROV
+b8910_1h_log(return)
+b8910_1h_ROV*log(return)
+b8910_1h_ROS
+b8910_2h_STR
+b8910_2h_log(share)
+b8910_2h_ROV
+b8910_2h_log(return)
+b8910_2h_ROV*log(return)
+b8910_2h_ROS
+b8910_3h_STR
+b8910_3h_log(share)
+b8910_3h_ROV
+b8910_3h_log(return)
+b8910_3h_ROV*log(return)
+b8910_3h_ROS
+b8910_4h_STR
+b8910_4h_log(share)
+b8910_4h_ROV
+b8910_4h_log(return)
+b8910_4h_ROV*log(return)
+b8910_4h_ROS
+b8910_12h_STR
+b8910_12h_log(share)
+b8910_12h_ROV
+b8910_12h_log(return)
+b8910_12h_ROV*log(return)
+b8910_12h_ROS
+b8910_1d_STR
+b8910_1d_log(share)
+b8910_1d_ROV
+b8910_1d_log(return)
+b8910_1d_ROV*log(return)
+b8910_1d_ROS
+b8910_3d_STR
+b8910_3d_log(share)
+b8910_3d_ROV
+b8910_3d_log(return)
+b8910_3d_ROV*log(return)
+b8910_3d_ROS
+b8910_7d_STR
+b8910_7d_log(share)
+b8910_7d_ROV
+b8910_7d_log(return)
+b8910_7d_ROV*log(return)
+b8910_7d_ROS
+b111213_1h_STR
+b111213_1h_log(share)
+b111213_1h_ROV
+b111213_1h_log(return)
+b111213_1h_ROV*log(return)
+b111213_1h_ROS
+b111213_2h_STR
+b111213_2h_log(share)
+b111213_2h_ROV
+b111213_2h_log(return)
+b111213_2h_ROV*log(return)
+b111213_2h_ROS
+b111213_3h_STR
+b111213_3h_log(share)
+b111213_3h_ROV
+b111213_3h_log(return)
+b111213_3h_ROV*log(return)
+b111213_3h_ROS
+b111213_4h_STR
+b111213_4h_log(share)
+b111213_4h_ROV
+b111213_4h_log(return)
+b111213_4h_ROV*log(return)
+b111213_4h_ROS
+b111213_12h_STR
+b111213_12h_log(share)
+b111213_12h_ROV
+b111213_12h_log(return)
+b111213_12h_ROV*log(return)
+b111213_12h_ROS
+b111213_1d_STR
+b111213_1d_log(share)
+b111213_1d_ROV
+b111213_1d_log(return)
+b111213_1d_ROV*log(return)
+b111213_1d_ROS
+b111213_3d_STR
+b111213_3d_log(share)
+b111213_3d_ROV
+b111213_3d_log(return)
+b111213_3d_ROV*log(return)
+b111213_3d_ROS
+b111213_7d_STR
+b111213_7d_log(share)
+b111213_7d_ROV
+b111213_7d_log(return)
+b111213_7d_ROV*log(return)
+b111213_7d_ROS
+b171819_1h_STR
+b171819_1h_log(share)
+b171819_1h_ROV
+b171819_1h_log(return)
+b171819_1h_ROV*log(return)
+b171819_1h_ROS
+b171819_2h_STR
+b171819_2h_log(share)
+b171819_2h_ROV
+b171819_2h_log(return)
+b171819_2h_ROV*log(return)
+b171819_2h_ROS
+b171819_3h_STR
+b171819_3h_log(share)
+b171819_3h_ROV
+b171819_3h_log(return)
+b171819_3h_ROV*log(return)
+b171819_3h_ROS
+b171819_4h_STR
+b171819_4h_log(share)
+b171819_4h_ROV
+b171819_4h_log(return)
+b171819_4h_ROV*log(return)
+b171819_4h_ROS
+b171819_12h_STR
+b171819_12h_log(share)
+b171819_12h_ROV
+b171819_12h_log(return)
+b171819_12h_ROV*log(return)
+b171819_12h_ROS
+b171819_1d_STR
+b171819_1d_log(share)
+b171819_1d_ROV
+b171819_1d_log(return)
+b171819_1d_ROV*log(return)
+b171819_1d_ROS
+b171819_3d_STR
+b171819_3d_log(share)
+b171819_3d_ROV
+b171819_3d_log(return)
+b171819_3d_ROV*log(return)
+b171819_3d_ROS
+b171819_7d_STR
+b171819_7d_log(share)
+b171819_7d_ROV
+b171819_7d_log(return)
+b171819_7d_ROV*log(return)
+b171819_7d_ROS
+total_time
+bit_rate
+playcnt_6h
+playcnt_1d
+playcnt_3d
+playcnt_7d
+share_pv_12h
+share_pv_1d
+share_pv_3d
+share_pv_7d
+return_uv_12h
+return_uv_1d
+return_uv_3d
+return_uv_7d
+c3_feature_tags_1d_matchnum
+c3_feature_tags_1d_maxscore
+c3_feature_tags_1d_avgscore
+c3_feature_tags_3d_matchnum
+c3_feature_tags_3d_maxscore
+c3_feature_tags_3d_avgscore
+c3_feature_tags_7d_matchnum
+c3_feature_tags_7d_maxscore
+c3_feature_tags_7d_avgscore
+c4_feature_tags_1d_matchnum
+c4_feature_tags_1d_maxscore
+c4_feature_tags_1d_avgscore
+c4_feature_tags_3d_matchnum
+c4_feature_tags_3d_maxscore
+c4_feature_tags_3d_avgscore
+c4_feature_tags_7d_matchnum
+c4_feature_tags_7d_maxscore
+c4_feature_tags_7d_avgscore
+c5_feature_tags_1d_matchnum
+c5_feature_tags_1d_maxscore
+c5_feature_tags_1d_avgscore
+c5_feature_tags_3d_matchnum
+c5_feature_tags_3d_maxscore
+c5_feature_tags_3d_avgscore
+c5_feature_tags_7d_matchnum
+c5_feature_tags_7d_maxscore
+c5_feature_tags_7d_avgscore
+c6_feature_tags_1d_matchnum
+c6_feature_tags_1d_maxscore
+c6_feature_tags_1d_avgscore
+c6_feature_tags_3d_matchnum
+c6_feature_tags_3d_maxscore
+c6_feature_tags_3d_avgscore
+c6_feature_tags_7d_matchnum
+c6_feature_tags_7d_maxscore
+c6_feature_tags_7d_avgscore
+c7_feature_tags_1d_matchnum
+c7_feature_tags_1d_maxscore
+c7_feature_tags_1d_avgscore
+c7_feature_tags_3d_matchnum
+c7_feature_tags_3d_maxscore
+c7_feature_tags_3d_avgscore
+c7_feature_tags_7d_matchnum
+c7_feature_tags_7d_maxscore
+c7_feature_tags_7d_avgscore
+c8_feature_share_score
+c8_feature_share_num
+c8_feature_share_rank
+c8_feature_return_score
+c8_feature_return_num
+c8_feature_return_rank
+c9_feature_share_score
+c9_feature_share_num
+c9_feature_share_rank
+c9_feature_return_score
+c9_feature_return_num
+c9_feature_return_rank
+d1_exp
+d1_return_n
+d1_rovn

+ 278 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_qiao/makedata_13_originData_20240705.scala

@@ -0,0 +1,278 @@
+package com.aliyun.odps.spark.examples.makedata_qiao
+
+import com.alibaba.fastjson.{JSON, JSONObject}
+import com.aliyun.odps.TableSchema
+import com.aliyun.odps.data.Record
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils, env}
+import examples.extractor.RankExtractorFeature_20240530
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+import org.xm.Similarity
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+/*
+   20240608 提取特征
+ */
+
+object makedata_13_originData_20240705 {
+  def main(args: Array[String]): Unit = {
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val tablePart = param.getOrElse("tablePart", "64").toInt
+    val beginStr = param.getOrElse("beginStr", "2023010100")
+    val endStr = param.getOrElse("endStr", "2023010123")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/13_sample_data/")
+    val project = param.getOrElse("project", "loghubods")
+    val table = param.getOrElse("table", "XXXX")
+    val repartition = param.getOrElse("repartition", "100").toInt
+
+    // 2 读取odps+表信息
+    val odpsOps = env.getODPS(sc)
+
+    // 3 循环执行数据生产
+    val timeRange = MyDateUtils.getDateHourRange(beginStr, endStr)
+    for (dt_hh <- timeRange) {
+      val dt = dt_hh.substring(0, 8)
+      val hh = dt_hh.substring(8, 10)
+      val partition = s"dt=$dt,hh=$hh"
+      println("开始执行partiton:" + partition)
+      val odpsData = odpsOps.readTable(project = project,
+        table = table,
+        partition = partition,
+        transfer = func,
+        numPartition = tablePart)
+        .map(record => {
+
+          val featureMap = new JSONObject()
+
+          // a 视频特征
+          val b1: JSONObject = if (record.isNull("b1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b1_feature"))
+          val b2: JSONObject = if (record.isNull("b2_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b2_feature"))
+          val b3: JSONObject = if (record.isNull("b3_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b3_feature"))
+          val b6: JSONObject = if (record.isNull("b6_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b6_feature"))
+          val b7: JSONObject = if (record.isNull("b7_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b7_feature"))
+
+          val b8: JSONObject = if (record.isNull("b8_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b8_feature"))
+          val b9: JSONObject = if (record.isNull("b9_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b9_feature"))
+          val b10: JSONObject = if (record.isNull("b10_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b10_feature"))
+          val b11: JSONObject = if (record.isNull("b11_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b11_feature"))
+          val b12: JSONObject = if (record.isNull("b12_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b12_feature"))
+          val b13: JSONObject = if (record.isNull("b13_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b13_feature"))
+          val b17: JSONObject = if (record.isNull("b17_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b17_feature"))
+          val b18: JSONObject = if (record.isNull("b18_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b18_feature"))
+          val b19: JSONObject = if (record.isNull("b19_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b19_feature"))
+
+
+          val origin_data = List(
+            (b1, b2, b3, "b123"), (b1, b6, b7, "b167"),
+            (b8, b9, b10, "b8910"), (b11, b12, b13, "b111213"),
+            (b17, b18, b19, "b171819")
+          )
+          for ((b_1, b_2, b_3, prefix1) <- origin_data){
+            for (prefix2 <- List(
+              "1h", "2h", "3h", "4h", "12h", "1d", "3d", "7d"
+            )){
+              val exp = if (b_1.isEmpty) 0D else b_1.getIntValue("exp_pv_" + prefix2).toDouble
+              val share = if (b_2.isEmpty) 0D else b_2.getIntValue("share_pv_" + prefix2).toDouble
+              val returns = if (b_3.isEmpty) 0D else b_3.getIntValue("return_uv_" + prefix2).toDouble
+              val f1 = RankExtractorFeature_20240530.calDiv(share, exp)
+              val f2 = RankExtractorFeature_20240530.calLog(share)
+              val f3 = RankExtractorFeature_20240530.calDiv(returns, exp)
+              val f4 = RankExtractorFeature_20240530.calLog(returns)
+              val f5 = f3 * f4
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "STR", f1)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "log(share)", f2)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROV", f3)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "log(return)", f4)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROV*log(return)", f5)
+            }
+          }
+
+          val video_info: JSONObject = if (record.isNull("t_v_info_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("t_v_info_feature"))
+          featureMap.put("total_time", if (video_info.containsKey("total_time")) video_info.getIntValue("total_time").toDouble else 0D)
+          featureMap.put("bit_rate", if (video_info.containsKey("bit_rate")) video_info.getIntValue("bit_rate").toDouble else 0D)
+
+          val c1: JSONObject = if (record.isNull("c1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("c1_feature"))
+          if (c1.nonEmpty){
+            featureMap.put("playcnt_6h", if (c1.containsKey("playcnt_6h")) c1.getIntValue("playcnt_6h").toDouble else 0D)
+            featureMap.put("playcnt_1d", if (c1.containsKey("playcnt_1d")) c1.getIntValue("playcnt_1d").toDouble else 0D)
+            featureMap.put("playcnt_3d", if (c1.containsKey("playcnt_3d")) c1.getIntValue("playcnt_3d").toDouble else 0D)
+            featureMap.put("playcnt_7d", if (c1.containsKey("playcnt_7d")) c1.getIntValue("playcnt_7d").toDouble else 0D)
+          }
+          val c2: JSONObject = if (record.isNull("c2_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("c2_feature"))
+          if (c2.nonEmpty){
+            featureMap.put("share_pv_12h", if (c2.containsKey("share_pv_12h")) c2.getIntValue("share_pv_12h").toDouble else 0D)
+            featureMap.put("share_pv_1d", if (c2.containsKey("share_pv_1d")) c2.getIntValue("share_pv_1d").toDouble else 0D)
+            featureMap.put("share_pv_3d", if (c2.containsKey("share_pv_3d")) c2.getIntValue("share_pv_3d").toDouble else 0D)
+            featureMap.put("share_pv_7d", if (c2.containsKey("share_pv_7d")) c2.getIntValue("share_pv_7d").toDouble else 0D)
+            featureMap.put("return_uv_12h", if (c2.containsKey("return_uv_12h")) c2.getIntValue("return_uv_12h").toDouble else 0D)
+            featureMap.put("return_uv_1d", if (c2.containsKey("return_uv_1d")) c2.getIntValue("return_uv_1d").toDouble else 0D)
+            featureMap.put("return_uv_3d", if (c2.containsKey("return_uv_3d")) c2.getIntValue("return_uv_3d").toDouble else 0D)
+            featureMap.put("return_uv_7d", if (c2.containsKey("return_uv_7d")) c2.getIntValue("return_uv_7d").toDouble else 0D)
+          }
+
+          val title = if (video_info.containsKey("title")) video_info.getString("title") else ""
+          if (!title.equals("")){
+            for (key_feature <- List("c3_feature", "c4_feature", "c5_feature", "c6_feature", "c7_feature")){
+              val c34567: JSONObject = if (record.isNull(key_feature)) new JSONObject() else
+                JSON.parseObject(record.getString(key_feature))
+              for (key_time <- List("tags_1d", "tags_3d", "tags_7d")) {
+                val tags = if (c34567.containsKey(key_time)) c34567.getString(key_time) else ""
+                if (!tags.equals("")){
+                  val (f1, f2, f3, f4) = funcC34567ForTags(tags, title)
+                  featureMap.put(key_feature + "_" + key_time + "_matchnum", f1)
+                  featureMap.put(key_feature + "_" + key_time + "_maxscore", f3)
+                  featureMap.put(key_feature + "_" + key_time + "_avgscore", f4)
+                }
+              }
+            }
+          }
+
+          val vid = if (record.isNull("vid")) "" else record.getString("vid")
+          if (!vid.equals("")){
+            for (key_feature <- List("c8_feature", "c9_feature")){
+              val c89: JSONObject = if (record.isNull(key_feature)) new JSONObject() else
+                JSON.parseObject(record.getString(key_feature))
+              for (key_action <- List("share", "return")){
+                  val cfListStr = if (c89.containsKey(key_action)) c89.getString(key_action) else ""
+                  if (!cfListStr.equals("")){
+                    val cfMap = cfListStr.split(",").map(r =>{
+                      val rList = r.split(":")
+                      (rList(0), (rList(1), rList(2), rList(3)))
+                    }).toMap
+                    if (cfMap.contains(vid)){
+                      val (score, num, rank) = cfMap(vid)
+                      featureMap.put(key_feature + "_" + key_action + "_score", score.toDouble)
+                      featureMap.put(key_feature + "_" + key_action + "_num", num.toDouble)
+                      featureMap.put(key_feature + "_" + key_action + "_rank", 1.0 / rank.toDouble)
+                    }
+                  }
+              }
+            }
+          }
+
+          val d1: JSONObject = if (record.isNull("d1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("d1_feature"))
+          if (d1.nonEmpty){
+            featureMap.put("d1_exp", if (d1.containsKey("exp")) d1.getString("exp").toDouble else 0D)
+            featureMap.put("d1_return_n", if (d1.containsKey("return_n")) d1.getString("return_n").toDouble else 0D)
+            featureMap.put("d1_rovn", if (d1.containsKey("rovn")) d1.getString("rovn").toDouble else 0D)
+          }
+
+
+          /*
+
+
+          视频:
+          曝光使用pv 分享使用pv 回流使用uv --> 1h 2h 3h 4h 12h 1d 3d 7d
+          STR log(share) ROV log(return) ROV*log(return)
+          40个特征组合
+          整体、整体曝光对应、推荐非冷启root、推荐冷启root、分省份root
+          200个特征值
+
+          视频:
+          视频时长、比特率
+
+          人:
+          播放次数 --> 6h 1d 3d 7d --> 4个
+          带回来的分享pv 回流uv --> 12h 1d 3d 7d --> 8个
+          人+vid-title:
+          播放点/回流点/分享点/累积分享/累积回流 --> 1d 3d 7d --> 匹配数量 语义最高相似度分 语义平均相似度分 --> 45个
+          人+vid-cf
+          基于分享行为/基于回流行为 -->  “分享cf”+”回流点击cf“ 相似分 相似数量 相似rank的倒数 --> 12个
+
+          头部视频:
+          曝光 回流 ROVn 3个特征
+
+          场景:
+          小时 星期 apptype city province pagesource 机器型号
+           */
+
+
+
+          //4 处理label信息。
+          val labels = new JSONObject
+          for (labelKey <- List(
+            "is_play", "is_share", "is_return", "noself_is_return", "return_uv", "noself_return_uv", "total_return_uv",
+            "share_pv", "total_share_uv"
+          )){
+            if (!record.isNull(labelKey)){
+              labels.put(labelKey, record.getString(labelKey))
+            }
+          }
+          //5 处理log key表头。
+          val apptype = record.getString("apptype")
+          val pagesource = record.getString("pagesource")
+          val mid = record.getString("mid")
+          // vid 已经提取了
+          val ts = record.getString("ts")
+          val abcode = record.getString("abcode")
+          val level = if (record.isNull("level")) "0" else record.getString("level")
+          val logKey = (apptype, pagesource, mid, vid, ts, abcode, level).productIterator.mkString(",")
+          val labelKey = labels.toString()
+          val featureKey = featureMap.toString()
+          //6 拼接数据,保存。
+          logKey + "\t" + labelKey + "\t" + featureKey
+
+        })
+
+      // 4 保存数据到hdfs
+      val savePartition = dt + hh
+      val hdfsPath = savePath + "/" + savePartition
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")){
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        odpsData.coalesce(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      }else{
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+  }
+
+  def func(record: Record, schema: TableSchema): Record = {
+    record
+  }
+  def funcC34567ForTags(tags: String, title: String): Tuple4[Double, String, Double, Double] = {
+    // 匹配数量 匹配词 语义最高相似度分 语义平均相似度分
+    val tagsList = tags.split(",")
+    var d1 = 0.0
+    val d2 = new ArrayBuffer[String]()
+    var d3 = 0.0
+    var d4 = 0.0
+    for (tag <- tagsList){
+      if (title.contains(tag)){
+        d1 = d1 + 1.0
+        d2.add(tag)
+      }
+      val score = Similarity.conceptSimilarity(tag, title)
+      d3 = if (score > d3) score else d3
+      d4 = d4 + score
+    }
+    d4 = if (tagsList.nonEmpty) d4 / tagsList.size else d4
+    (d1, d2.mkString(","), d3, d4)
+  }
+}

+ 91 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_qiao/makedata_14_valueData_20240705.scala

@@ -0,0 +1,91 @@
+package com.aliyun.odps.spark.examples.makedata_qiao
+
+import com.alibaba.fastjson.JSON
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils}
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+import scala.io.Source
+/*
+
+ */
+
+object makedata_14_valueData_20240705 {
+  def main(args: Array[String]): Unit = {
+
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    val loader = getClass.getClassLoader
+    val resourceUrl = loader.getResource("20240608_feature_name.txt")
+    val content =
+      if (resourceUrl != null) {
+        val content = Source.fromURL(resourceUrl).getLines().mkString("\n")
+        Source.fromURL(resourceUrl).close()
+        content
+      } else {
+        ""
+      }
+    println(content)
+    val contentList = content.split("\n")
+      .map(r=> r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r=> r.nonEmpty).toList
+    val contentList_bc = sc.broadcast(contentList)
+
+
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val beginStr = param.getOrElse("beginStr", "20230101")
+    val endStr = param.getOrElse("endStr", "20230101")
+    val readPath = param.getOrElse("readPath", "/dw/recommend/model/13_sample_data/")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/14_feature_data/")
+    val repartition = param.getOrElse("repartition", "200").toInt
+    val dateRange = MyDateUtils.getDateRange(beginStr, endStr)
+    for (date <- dateRange) {
+      val data = sc.textFile(readPath + "/" + date + "*")
+      val data1 = data.map(r => {
+        val rList = r.split("\t")
+        val logKey = rList(0)
+        val labelKey = rList(1)
+        val featureKey = rList(2)
+        (logKey, labelKey, featureKey)
+      }).filter(r =>
+        r._1.split(",")(6).equals("0")
+      ).mapPartitions(row => {
+        val result = new ArrayBuffer[String]()
+        val contentList = contentList_bc.value
+        row.foreach {
+          case (logKey, labelKey, featureKey) =>
+            val featureJson = JSON.parseObject(featureKey)
+
+            val featureValues = contentList.map(key => {
+              if (featureJson.containsKey(key)) {
+                featureJson.getDouble(key)
+              } else {
+                0.0
+              }
+            })
+            result.add(logKey + "\t" + labelKey + "\t" + featureValues.mkString(","))
+        }
+        result.iterator
+      })
+
+      // 4 保存数据到hdfs
+      val hdfsPath = savePath + "/" + date
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        data1.coalesce(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      } else {
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+
+  }
+}

+ 127 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_qiao/makedata_16_bucketData_20240705.scala

@@ -0,0 +1,127 @@
+package com.aliyun.odps.spark.examples.makedata_qiao
+
+import com.alibaba.fastjson.JSON
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils}
+import examples.extractor.ExtractorUtils
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+import scala.io.Source
+/*
+
+ */
+
+object makedata_16_bucketData_20240705 {
+  def main(args: Array[String]): Unit = {
+
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    val loader = getClass.getClassLoader
+    val resourceUrl = loader.getResource("20240608_feature_name.txt")
+    val content =
+      if (resourceUrl != null) {
+        val content = Source.fromURL(resourceUrl).getLines().mkString("\n")
+        Source.fromURL(resourceUrl).close()
+        content
+      } else {
+        ""
+      }
+    println(content)
+    val contentList = content.split("\n")
+      .map(r=> r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r=> r.nonEmpty).toList
+    val contentList_br = sc.broadcast(contentList)
+
+    val resourceUrlBucket = loader.getResource("20240609_bucket_274.txt")
+    val buckets =
+      if (resourceUrlBucket != null) {
+        val buckets = Source.fromURL(resourceUrlBucket).getLines().mkString("\n")
+        Source.fromURL(resourceUrlBucket).close()
+        buckets
+      } else {
+        ""
+      }
+    println(buckets)
+    val bucketsMap = buckets.split("\n")
+      .map(r => r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r => r.nonEmpty)
+      .map(r =>{
+        val rList = r.split("\t")
+        (rList(0), (rList(1).toDouble, rList(2).split(",").map(_.toDouble)))
+      }).toMap
+    val bucketsMap_br = sc.broadcast(bucketsMap)
+
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val readPath = param.getOrElse("readPath", "/dw/recommend/model/14_feature_data/")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/16_train_data/")
+    val beginStr = param.getOrElse("beginStr", "20240606")
+    val endStr = param.getOrElse("endStr", "20240607")
+    val repartition = param.getOrElse("repartition", "200").toInt
+
+    val dateRange = MyDateUtils.getDateRange(beginStr, endStr)
+    for (date <- dateRange) {
+      println("开始执行:" + date)
+      val data = sc.textFile(readPath + date).map(r=>{
+        val rList = r.split("\t")
+        val logKey = rList(0)
+        val labelKey = rList(1)
+        val features = rList(2).split(",").map(_.toDouble)
+        (logKey, labelKey, features)
+      })
+        .filter{
+          case (logKey, labelKey, features) =>
+            val logKeyList = logKey.split(",")
+            val apptype = logKeyList(0)
+            val pagesource = logKeyList(1)
+            Set("0", "4", "5", "21", "3", "6").contains(apptype) && pagesource.endsWith("recommend")
+        }
+        .map{
+          case (logKey, labelKey, features) =>
+            val label = JSON.parseObject(labelKey).getOrDefault("is_return", "0").toString
+            (label, features)
+        }
+        .mapPartitions(row => {
+        val result = new ArrayBuffer[String]()
+        val contentList = contentList_br.value
+        val bucketsMap = bucketsMap_br.value
+        row.foreach{
+          case (label, features) =>
+            val featuresBucket = contentList.indices.map(i =>{
+              val featureName = contentList(i)
+              val score = features(i)
+              if (score > 1E-8){
+                val (bucketNum, buckets) = bucketsMap(featureName)
+                val scoreNew = 1.0 / bucketNum * (ExtractorUtils.findInsertPosition(buckets, score).toDouble + 1.0)
+                featureName + ":" + scoreNew.toString
+              }else{
+                ""
+              }
+            }).filter(_.nonEmpty)
+            result.add(label + "\t" + featuresBucket.mkString("\t"))
+        }
+        result.iterator
+      })
+
+      // 4 保存数据到hdfs
+      val hdfsPath = savePath + "/" + date
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        data.repartition(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      } else {
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+
+
+
+  }
+}

+ 280 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_recsys/makedata_recsys_41_originData_20240709.scala

@@ -0,0 +1,280 @@
+package com.aliyun.odps.spark.examples.makedata_recsys
+
+import com.alibaba.fastjson.{JSON, JSONObject}
+import com.aliyun.odps.TableSchema
+import com.aliyun.odps.data.Record
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils, env}
+import examples.extractor.RankExtractorFeature_20240530
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+import org.xm.Similarity
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+/*
+   20240608 提取特征
+ */
+
+object makedata_recsys_41_originData_20240709 {
+  def main(args: Array[String]): Unit = {
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val tablePart = param.getOrElse("tablePart", "64").toInt
+    val beginStr = param.getOrElse("beginStr", "2023010100")
+    val endStr = param.getOrElse("endStr", "2023010123")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/41_sample_data/")
+    val project = param.getOrElse("project", "loghubods")
+    val table = param.getOrElse("table", "XXXX")
+    val repartition = param.getOrElse("repartition", "32").toInt
+
+    // 2 读取odps+表信息
+    val odpsOps = env.getODPS(sc)
+
+    // 3 循环执行数据生产
+    val timeRange = MyDateUtils.getDateHourRange(beginStr, endStr)
+    for (dt_hh <- timeRange) {
+      val dt = dt_hh.substring(0, 8)
+      val hh = dt_hh.substring(8, 10)
+      val partition = s"dt=$dt,hh=$hh"
+      println("开始执行partiton:" + partition)
+      val odpsData = odpsOps.readTable(project = project,
+          table = table,
+          partition = partition,
+          transfer = func,
+          numPartition = tablePart)
+        .map(record => {
+
+          val featureMap = new JSONObject()
+
+          // a 视频特征
+          val b1: JSONObject = if (record.isNull("b1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b1_feature"))
+          val b2: JSONObject = if (record.isNull("b2_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b2_feature"))
+          val b3: JSONObject = if (record.isNull("b3_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b3_feature"))
+          val b6: JSONObject = if (record.isNull("b6_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b6_feature"))
+          val b7: JSONObject = if (record.isNull("b7_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b7_feature"))
+
+          val b8: JSONObject = if (record.isNull("b8_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b8_feature"))
+          val b9: JSONObject = if (record.isNull("b9_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b9_feature"))
+          val b10: JSONObject = if (record.isNull("b10_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b10_feature"))
+          val b11: JSONObject = if (record.isNull("b11_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b11_feature"))
+          val b12: JSONObject = if (record.isNull("b12_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b12_feature"))
+          val b13: JSONObject = if (record.isNull("b13_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b13_feature"))
+          val b17: JSONObject = if (record.isNull("b17_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b17_feature"))
+          val b18: JSONObject = if (record.isNull("b18_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b18_feature"))
+          val b19: JSONObject = if (record.isNull("b19_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b19_feature"))
+
+
+          val origin_data = List(
+            (b1, b2, b3, "b123"), (b1, b6, b7, "b167"),
+            (b8, b9, b10, "b8910"), (b11, b12, b13, "b111213"),
+            (b17, b18, b19, "b171819")
+          )
+          for ((b_1, b_2, b_3, prefix1) <- origin_data) {
+            for (prefix2 <- List(
+              "1h", "2h", "3h", "4h", "12h", "1d", "3d", "7d"
+            )) {
+              val exp = if (b_1.isEmpty) 0D else b_1.getIntValue("exp_pv_" + prefix2).toDouble
+              val share = if (b_2.isEmpty) 0D else b_2.getIntValue("share_pv_" + prefix2).toDouble
+              val returns = if (b_3.isEmpty) 0D else b_3.getIntValue("return_uv_" + prefix2).toDouble
+              val f1 = RankExtractorFeature_20240530.calDiv(share, exp)
+              val f2 = RankExtractorFeature_20240530.calLog(share)
+              val f3 = RankExtractorFeature_20240530.calDiv(returns, exp)
+              val f4 = RankExtractorFeature_20240530.calLog(returns)
+              val f5 = f3 * f4
+              val f6 = RankExtractorFeature_20240530.calDiv(returns, share)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "STR", f1)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "log(share)", f2)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROV", f3)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "log(return)", f4)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROV*log(return)", f5)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROS", f6)
+            }
+          }
+
+          val video_info: JSONObject = if (record.isNull("t_v_info_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("t_v_info_feature"))
+          featureMap.put("total_time", if (video_info.containsKey("total_time")) video_info.getIntValue("total_time").toDouble else 0D)
+          featureMap.put("bit_rate", if (video_info.containsKey("bit_rate")) video_info.getIntValue("bit_rate").toDouble else 0D)
+
+          val c1: JSONObject = if (record.isNull("c1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("c1_feature"))
+          if (c1.nonEmpty) {
+            featureMap.put("playcnt_6h", if (c1.containsKey("playcnt_6h")) c1.getIntValue("playcnt_6h").toDouble else 0D)
+            featureMap.put("playcnt_1d", if (c1.containsKey("playcnt_1d")) c1.getIntValue("playcnt_1d").toDouble else 0D)
+            featureMap.put("playcnt_3d", if (c1.containsKey("playcnt_3d")) c1.getIntValue("playcnt_3d").toDouble else 0D)
+            featureMap.put("playcnt_7d", if (c1.containsKey("playcnt_7d")) c1.getIntValue("playcnt_7d").toDouble else 0D)
+          }
+          val c2: JSONObject = if (record.isNull("c2_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("c2_feature"))
+          if (c2.nonEmpty) {
+            featureMap.put("share_pv_12h", if (c2.containsKey("share_pv_12h")) c2.getIntValue("share_pv_12h").toDouble else 0D)
+            featureMap.put("share_pv_1d", if (c2.containsKey("share_pv_1d")) c2.getIntValue("share_pv_1d").toDouble else 0D)
+            featureMap.put("share_pv_3d", if (c2.containsKey("share_pv_3d")) c2.getIntValue("share_pv_3d").toDouble else 0D)
+            featureMap.put("share_pv_7d", if (c2.containsKey("share_pv_7d")) c2.getIntValue("share_pv_7d").toDouble else 0D)
+            featureMap.put("return_uv_12h", if (c2.containsKey("return_uv_12h")) c2.getIntValue("return_uv_12h").toDouble else 0D)
+            featureMap.put("return_uv_1d", if (c2.containsKey("return_uv_1d")) c2.getIntValue("return_uv_1d").toDouble else 0D)
+            featureMap.put("return_uv_3d", if (c2.containsKey("return_uv_3d")) c2.getIntValue("return_uv_3d").toDouble else 0D)
+            featureMap.put("return_uv_7d", if (c2.containsKey("return_uv_7d")) c2.getIntValue("return_uv_7d").toDouble else 0D)
+          }
+
+          val title = if (video_info.containsKey("title")) video_info.getString("title") else ""
+          if (!title.equals("")) {
+            for (key_feature <- List("c3_feature", "c4_feature", "c5_feature", "c6_feature", "c7_feature")) {
+              val c34567: JSONObject = if (record.isNull(key_feature)) new JSONObject() else
+                JSON.parseObject(record.getString(key_feature))
+              for (key_time <- List("tags_1d", "tags_3d", "tags_7d")) {
+                val tags = if (c34567.containsKey(key_time)) c34567.getString(key_time) else ""
+                if (!tags.equals("")) {
+                  val (f1, f2, f3, f4) = funcC34567ForTags(tags, title)
+                  featureMap.put(key_feature + "_" + key_time + "_matchnum", f1)
+                  featureMap.put(key_feature + "_" + key_time + "_maxscore", f3)
+                  featureMap.put(key_feature + "_" + key_time + "_avgscore", f4)
+                }
+              }
+            }
+          }
+
+          val vid = if (record.isNull("vid")) "" else record.getString("vid")
+          if (!vid.equals("")) {
+            for (key_feature <- List("c8_feature", "c9_feature")) {
+              val c89: JSONObject = if (record.isNull(key_feature)) new JSONObject() else
+                JSON.parseObject(record.getString(key_feature))
+              for (key_action <- List("share", "return")) {
+                val cfListStr = if (c89.containsKey(key_action)) c89.getString(key_action) else ""
+                if (!cfListStr.equals("")) {
+                  val cfMap = cfListStr.split(",").map(r => {
+                    val rList = r.split(":")
+                    (rList(0), (rList(1), rList(2), rList(3)))
+                  }).toMap
+                  if (cfMap.contains(vid)) {
+                    val (score, num, rank) = cfMap(vid)
+                    featureMap.put(key_feature + "_" + key_action + "_score", score.toDouble)
+                    featureMap.put(key_feature + "_" + key_action + "_num", num.toDouble)
+                    featureMap.put(key_feature + "_" + key_action + "_rank", 1.0 / rank.toDouble)
+                  }
+                }
+              }
+            }
+          }
+
+          val d1: JSONObject = if (record.isNull("d1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("d1_feature"))
+          if (d1.nonEmpty) {
+            featureMap.put("d1_exp", if (d1.containsKey("exp")) d1.getString("exp").toDouble else 0D)
+            featureMap.put("d1_return_n", if (d1.containsKey("return_n")) d1.getString("return_n").toDouble else 0D)
+            featureMap.put("d1_rovn", if (d1.containsKey("rovn")) d1.getString("rovn").toDouble else 0D)
+          }
+
+
+          /*
+
+
+          视频:
+          曝光使用pv 分享使用pv 回流使用uv --> 1h 2h 3h 4h 12h 1d 3d 7d
+          STR log(share) ROV log(return) ROV*log(return)
+          40个特征组合
+          整体、整体曝光对应、推荐非冷启root、推荐冷启root、分省份root
+          200个特征值
+
+          视频:
+          视频时长、比特率
+
+          人:
+          播放次数 --> 6h 1d 3d 7d --> 4个
+          带回来的分享pv 回流uv --> 12h 1d 3d 7d --> 8个
+          人+vid-title:
+          播放点/回流点/分享点/累积分享/累积回流 --> 1d 3d 7d --> 匹配数量 语义最高相似度分 语义平均相似度分 --> 45个
+          人+vid-cf
+          基于分享行为/基于回流行为 -->  “分享cf”+”回流点击cf“ 相似分 相似数量 相似rank的倒数 --> 12个
+
+          头部视频:
+          曝光 回流 ROVn 3个特征
+
+          场景:
+          小时 星期 apptype city province pagesource 机器型号
+           */
+
+
+          //4 处理label信息。
+          val labels = new JSONObject
+          for (labelKey <- List(
+            "is_play", "is_share", "is_return", "noself_is_return", "return_uv", "noself_return_uv", "total_return_uv",
+            "share_pv", "total_share_uv"
+          )) {
+            if (!record.isNull(labelKey)) {
+              labels.put(labelKey, record.getString(labelKey))
+            }
+          }
+          //5 处理log key表头。
+          val apptype = record.getString("apptype")
+          val pagesource = record.getString("pagesource")
+          val mid = record.getString("mid")
+          // vid 已经提取了
+          val ts = record.getString("ts")
+          val abcode = record.getString("abcode")
+          val level = if (record.isNull("level")) "0" else record.getString("level")
+          val logKey = (apptype, pagesource, mid, vid, ts, abcode, level).productIterator.mkString(",")
+          val labelKey = labels.toString()
+          val featureKey = featureMap.toString()
+          //6 拼接数据,保存。
+          logKey + "\t" + labelKey + "\t" + featureKey
+
+        })
+
+      // 4 保存数据到hdfs
+      val savePartition = dt + hh
+      val hdfsPath = savePath + "/" + savePartition
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        odpsData.coalesce(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      } else {
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+  }
+
+  def func(record: Record, schema: TableSchema): Record = {
+    record
+  }
+
+  def funcC34567ForTags(tags: String, title: String): Tuple4[Double, String, Double, Double] = {
+    // 匹配数量 匹配词 语义最高相似度分 语义平均相似度分
+    val tagsList = tags.split(",")
+    var d1 = 0.0
+    val d2 = new ArrayBuffer[String]()
+    var d3 = 0.0
+    var d4 = 0.0
+    for (tag <- tagsList) {
+      if (title.contains(tag)) {
+        d1 = d1 + 1.0
+        d2.add(tag)
+      }
+      val score = Similarity.conceptSimilarity(tag, title)
+      d3 = if (score > d3) score else d3
+      d4 = d4 + score
+    }
+    d4 = if (tagsList.nonEmpty) d4 / tagsList.size else d4
+    (d1, d2.mkString(","), d3, d4)
+  }
+}

+ 103 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_recsys/makedata_recsys_42_bucket_20240709.scala

@@ -0,0 +1,103 @@
+package com.aliyun.odps.spark.examples.makedata_recsys
+
+import com.alibaba.fastjson.JSON
+import com.aliyun.odps.spark.examples.myUtils.{MyHdfsUtils, ParamUtils}
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+import scala.io.Source
+/*
+
+ */
+
+object makedata_recsys_42_bucket_20240709 {
+  def main(args: Array[String]): Unit = {
+
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    val loader = getClass.getClassLoader
+    val resourceUrl = loader.getResource("20240709_recsys_feature_name_314.txt")
+    val content =
+      if (resourceUrl != null) {
+        val content = Source.fromURL(resourceUrl).getLines().mkString("\n")
+        Source.fromURL(resourceUrl).close()
+        content
+      } else {
+        ""
+      }
+    println(content)
+    val contentList = content.split("\n")
+      .map(r=> r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r=> r.nonEmpty).toList
+
+
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val readPath = param.getOrElse("readPath", "/dw/recommend/model/41_recsys_sample_data_v1/20240705*")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/41_recsys_bucket/")
+    val fileName = param.getOrElse("fileName", "20240705_314_200")
+    val sampleRate = param.getOrElse("sampleRate", "1.0").toDouble
+    val bucketNum = param.getOrElse("bucketNum", "200").toInt
+
+    val data = sc.textFile(readPath)
+    println("问题数据数量:" + data.filter(r=>r.split("\t").length != 3).count())
+    val data1 = data.map(r => {
+      val rList = r.split("\t")
+      val jsons = JSON.parseObject(rList(2))
+      val doubles = scala.collection.mutable.Map[String, Double]()
+      jsons.foreach(r =>{
+        doubles.put(r._1, jsons.getDoubleValue(r._1))
+      })
+      doubles
+    }).sample(false, sampleRate ).repartition(20)
+
+    val result = new ArrayBuffer[String]()
+
+    for (i <- contentList.indices){
+      println("特征:" + contentList(i))
+      val data2 = data1.map(r => r.getOrDefault(contentList(i), 0D)).filter(_ > 1E-8).collect().sorted
+      val len = data2.length
+      if (len == 0){
+        result.add(contentList(i) + "\t" + bucketNum.toString + "\t" + "0")
+      }else{
+        val oneBucketNum = (len - 1) / (bucketNum - 1) + 1 // 确保每个桶至少有一个元素
+        val buffers = new ArrayBuffer[Double]()
+
+        var lastBucketValue = data2(0) // 记录上一个桶的切分点
+        for (j <- 0 until len by oneBucketNum) {
+          val d = data2(j)
+          if (j > 0 && d != lastBucketValue) {
+            // 如果当前切分点不同于上一个切分点,则保存当前切分点
+            buffers += d
+          }
+          lastBucketValue = d // 更新上一个桶的切分点
+        }
+
+        // 最后一个桶的结束点应该是数组的最后一个元素
+        if (!buffers.contains(data2.last)) {
+          buffers += data2.last
+        }
+        result.add(contentList(i) + "\t" + bucketNum.toString + "\t" + buffers.mkString(","))
+      }
+    }
+    val data3 = sc.parallelize(result)
+
+
+    // 4 保存数据到hdfs
+    val hdfsPath = savePath + "/" + fileName
+    if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+      println("删除路径并开始数据写入:" + hdfsPath)
+      MyHdfsUtils.delete_hdfs_path(hdfsPath)
+      data3.repartition(1).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+    } else {
+      println("路径不合法,无法写入:" + hdfsPath)
+    }
+  }
+}

+ 130 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_recsys/makedata_recsys_43_bucketData_20240709.scala

@@ -0,0 +1,130 @@
+package com.aliyun.odps.spark.examples.makedata_recsys
+
+import com.alibaba.fastjson.JSON
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils}
+import examples.extractor.ExtractorUtils
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+import scala.io.Source
+/*
+
+ */
+
+object makedata_recsys_43_bucketData_20240709 {
+  def main(args: Array[String]): Unit = {
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val readPath = param.getOrElse("readPath", "/dw/recommend/model/41_recsys_sample_data_v1/")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/43_recsys_train_data_v1/")
+    val beginStr = param.getOrElse("beginStr", "20240703")
+    val endStr = param.getOrElse("endStr", "20240703")
+    val repartition = param.getOrElse("repartition", "100").toInt
+    val filterNames = param.getOrElse("filterNames", "").split(",").toSet
+    val whatLabel = param.getOrElse("whatLabel", "is_return")
+    val whatApps = param.getOrElse("whatApps", "0,4,5,21,3,6").split(",").toSet
+    val fileName = param.getOrElse("fileName", "20240709_recsys_bucket_314.txt")
+
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    val loader = getClass.getClassLoader
+
+    val resourceUrlBucket = loader.getResource(fileName)
+    val buckets =
+      if (resourceUrlBucket != null) {
+        val buckets = Source.fromURL(resourceUrlBucket).getLines().mkString("\n")
+        Source.fromURL(resourceUrlBucket).close()
+        buckets
+      } else {
+        ""
+      }
+    println(buckets)
+    val bucketsMap = buckets.split("\n")
+      .map(r => r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r => r.nonEmpty)
+      .map(r =>{
+        val rList = r.split("\t")
+        (rList(0), (rList(1).toDouble, rList(2).split(",").map(_.toDouble)))
+      }).toMap
+    val bucketsMap_br = sc.broadcast(bucketsMap)
+
+
+
+
+    val dateRange = MyDateUtils.getDateRange(beginStr, endStr)
+    for (date <- dateRange) {
+      println("开始执行:" + date)
+      val data = sc.textFile(readPath + "/" + date + "*").map(r=>{
+        val rList = r.split("\t")
+        val logKey = rList(0)
+        val labelKey = rList(1)
+        val jsons = JSON.parseObject(rList(2))
+        val features = scala.collection.mutable.Map[String, Double]()
+        jsons.foreach(r => {
+          features.put(r._1, jsons.getDoubleValue(r._1))
+        })
+        (logKey, labelKey, features)
+      })
+        .filter{
+          case (logKey, labelKey, features) =>
+            val logKeyList = logKey.split(",")
+            val apptype = logKeyList(0)
+            val pagesource = logKeyList(1)
+            whatApps.contains(apptype) && pagesource.endsWith("recommend")
+        }
+        .map{
+          case (logKey, labelKey, features) =>
+            val label = JSON.parseObject(labelKey).getOrDefault(whatLabel, "0").toString
+            (label, features)
+        }
+        .mapPartitions(row => {
+          val result = new ArrayBuffer[String]()
+          val bucketsMap = bucketsMap_br.value
+          row.foreach{
+            case (label, features) =>
+              val featuresBucket = features.map{
+                case (name, score) =>
+                  var ifFilter = false
+                  if (filterNames.nonEmpty){
+                    filterNames.foreach(r=> if (!ifFilter && name.startsWith(r)) {ifFilter = true} )
+                  }
+                  if (ifFilter){
+                    ""
+                  }else{
+                    if (score > 1E-8) {
+                      if (bucketsMap.contains(name)) {
+                        val (bucketsNum, buckets) = bucketsMap(name)
+                        val scoreNew = 1.0 / bucketsNum * (ExtractorUtils.findInsertPosition(buckets, score).toDouble + 1.0)
+                        name + ":" + scoreNew.toString
+                      } else {
+                        name + ":" + score.toString
+                      }
+                    } else {
+                      ""
+                    }
+                  }
+              }.filter(_.nonEmpty)
+              result.add(label + "\t" + featuresBucket.mkString("\t"))
+          }
+          result.iterator
+      })
+
+      // 4 保存数据到hdfs
+      val hdfsPath = savePath + "/" + date
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        data.repartition(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      } else {
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+  }
+}

+ 49 - 3
src/main/scala/com/aliyun/odps/spark/examples/临时记录的脚本-推荐

@@ -64,8 +64,14 @@ nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.s
 ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
 savePath:/dw/recommend/model/04_str_data/ beginStr:20240311 endStr:20240312 featureVersion:v4 ifRepart:100 \
 > p7.log 2>&1 &
-
----
+---------------------------------------------------------------------------------------------
+---------------------------------------------------------------------------------------------
+--------------------------------下-----------------------------------------------------------
+--------------------------------面-----------------------------------------------------------
+--------------------------------为-----------------------------------------------------------
+--------------------------------准-----------------------------------------------------------
+---------------------------------------------------------------------------------------------
+---------------------------------------------------------------------------------------------
 
 nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
 --class com.aliyun.odps.spark.examples.makedata.makedata_13_originData_20240529 \
@@ -160,4 +166,44 @@ beginStr:2024062600 endStr:2024062623 \
 readDate:20240626 \
 > p17_20240626.log 2>&1 &
 
-/dw/recommend/model/17_for_check/
+/dw/recommend/model/17_for_check/
+
+
+------------------------------------------------------------------------------------------------------------------------
+
+nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_41_originData_20240709 \
+--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+tablePart:64 repartition:32 \
+beginStr:2024070508 endStr:2024070508 \
+savePath:/dw/recommend/model/41_recsys_sample_data/ \
+table:alg_recsys_sample_all \
+> p41_2024070508.log 2>&1 &
+
+nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_42_bucket_20240709 \
+--master yarn --driver-memory 16G --executor-memory 1G --executor-cores 1 --num-executors 16 \
+--conf spark.driver.maxResultSize=16G \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:/dw/recommend/model/41_recsys_sample_data_v1/20240705* \
+savePath:/dw/recommend/model/42_recsys_bucket/ \
+fileName:20240705_314_200 \
+bucketNum:200 sampleRate:1.0 \
+> p42.log 2>&1 &
+
+nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata_recsys.makedata_recsys_43_bucketData_20240709 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:/dw/recommend/model/41_recsys_sample_data/ \
+savePath:/dw/recommend/model/43_recsys_train_data/ \
+beginStr:20240705 endStr:20240705 repartition:100 \
+filterNames:XXXXXXXXX \
+fileName:20240609_bucket_314.txt \
+whatLabel:is_return whatApps:0,4,21,3,6,17,23 \
+> p43_20240705.log 2>&1 &
+
+------------- 20240709_recsys_bucket_314.txt ------------ 20240609_bucket_274.txt -------------
+------------- filterNames:b123_1h_ROS,b123_2h_ROS,b123_3h_ROS,b123_4h_ROS,b123_12h_ROS,b123_1d_ROS,b123_3d_ROS,b123_7d_ROS,b167_1h_ROS,b167_2h_ROS,b167_3h_ROS,b167_4h_ROS,b167_12h_ROS,b167_1d_ROS,b167_3d_ROS,b167_7d_ROS,b8910_1h_ROS,b8910_2h_ROS,b8910_3h_ROS,b8910_4h_ROS,b8910_12h_ROS,b8910_1d_ROS,b8910_3d_ROS,b8910_7d_ROS,b111213_1h_ROS,b111213_2h_ROS,b111213_3h_ROS,b111213_4h_ROS,b111213_12h_ROS,b111213_1d_ROS,b111213_3d_ROS,b111213_7d_ROS,b171819_1h_ROS,b171819_2h_ROS,b171819_3h_ROS,b171819_4h_ROS,b171819_12h_ROS,b171819_1d_ROS,b171819_3d_ROS,b171819_7d_ROS \
+------------- filterNames:XXXXXXXXX \

部分文件因为文件数量过多而无法显示