Joe 9 miesięcy temu
rodzic
commit
7826658225

+ 55 - 28
qiaojialiang/checkHiveDataUtil.py

@@ -15,19 +15,28 @@ def check_origin_hive(args):
     project = "loghubods"
     # table = "alg_recsys_view_sample_v2"
     table = args.table
-    partitionDt = args.partitionDt
-    partitionHh = args.partitionHh
-    count = check_data(project, table, partitionDt, partitionHh)
-    if count == 0:
-        print("1")
+    beginStr = args.beginStr
+    endStr = args.endStr
+    # 检查从begin到end的每一个小时级分区数据是否存在,有一个存在即算存在可以处理
+    # 如果全都为空报警
+    time_sequence = generate_time_sequence(beginStr, endStr)
+    exist_partition = []
+    for time_str in time_sequence:
+        result = split_date_time(time_str)
+        partitionDt = result[0]
+        partitionHh = result[1]
+        count = check_data(project, table, partitionDt, partitionHh)
+        if count == 0:
+            print(f'分区:dt={partitionDt}/hh={partitionHh},数据为空')
+        else:
+            exist_partition.append(f'分区:dt={partitionDt}/hh={partitionHh},数据:{count}')
+    if len(exist_partition) == 0:
         exit(1)
     else:
-        # print('存在 数据 size:', count)
         bot = FeishuBot()
-        msg = (f'推荐模型数据更新 \n --step1【校验hive数据源】【success】:\n'
-               f'{project}.{table},分区:dt={partitionDt}/hh={partitionHh},数据总量:{count}')
+        msg = (
+            f'推荐模型数据更新 \n --step1【校验hive数据源】【success】:\n beginStr:{beginStr},endStr:{endStr}\n,detail:{exist_partition}')
         bot.send_message(msg)
-        print("0")
 
 
 def check_data(project, table, partitionDt, partitionDtHh) -> int:
@@ -68,27 +77,45 @@ def check_data(project, table, partitionDt, partitionDtHh) -> int:
     return data_count
 
 
+def generate_time_sequence(beginStr, endStr):
+    # 将字符串时间转换为datetime对象
+    from datetime import datetime, timedelta
+
+    # 定义时间格式
+    time_format = "%Y%m%d%H"
+
+    # 转换字符串为datetime对象
+    begin_time = datetime.strptime(beginStr, time_format)
+    end_time = datetime.strptime(endStr, time_format)
+
+    # 生成时间序列
+    time_sequence = []
+    current_time = begin_time
+    while current_time <= end_time:
+        # 将datetime对象转换回指定格式的字符串
+        time_sequence.append(current_time.strftime(time_format))
+        # 增加一个小时
+        current_time += timedelta(hours=1)
+
+    return time_sequence
+
+
+def split_date_time(date_time_str):
+    # 假设date_time_str是一个长度为12的字符串,格式为YYYYMMDDHH
+    # 切片获取日期部分(前8位)和时间部分(后4位中的前2位,因为后两位可能是分钟或秒,但这里只取小时)
+    date_part = date_time_str[:8]
+    time_part = date_time_str[8:10]  # 只取小时部分
+
+    # 将结果存储在一个数组中(在Python中通常使用列表)
+    result = [date_part, time_part]
+
+    return result
+
+
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='脚本utils')
-    # parser.add_argument('--excute_program', type=str, help='执行程序')
-    parser.add_argument('--partitionDt', type=str, help='表分区Dt')
-    parser.add_argument('--partitionHh', type=str, help='表分区Hh')
-    # parser.add_argument('--project', type=str, help='表空间')
+    parser.add_argument('--beginStr', type=str, help='表分区Dt,beginStr')
+    parser.add_argument('--endStr', type=str, help='表分区Hh,endStr')
     parser.add_argument('--table', type=str, help='表名')
     argv = parser.parse_args()
-    # args = parser.parse_args()
-    # table = argv[1]
-    # partition = argv[2]
-    # table = 'alg_recsys_sample_all'
-    # partition = '20240703'
     check_origin_hive(argv)
-
-# if __name__ == '__main__':
-#     project='1'
-#     table='1'
-#     partitionDt='1'
-#     partitionHh='1'
-#     count='1'
-#     bot = FeishuBot
-#     msg = f'读取project:${project},table:${table},分区:dt= ${partitionDt}/hh=${partitionHh},查询数据总量:${count}'
-#     bot.send_message(msg)

+ 20 - 6
qiaojialiang/handle_rov.sh

@@ -1,17 +1,20 @@
 #!/bin/sh
 set -ex
 
-partitionDt="$(date -d '1 days ago' +%Y%m%d)"
-partitionHh="14"
-table='alg_recsys_sample_all_new'
+#partitionDt="$(date -d '1 days ago' +%Y%m%d)"
+#partitionHh="14"
+table='alg_recsys_sample_all_test'
+
+beginStr=2024061600
+endStr=2024061623
 
 # 0 判断上游表是否生产完成,最长等待到12点
 source /root/anaconda3/bin/activate py37
 max_hour=11
 max_minute=00
-echo "开始校验是否生产完数据,分区信息:dt:${partitionDt},hh:${partitionHh}"
+echo "开始校验是否生产完数据,分区信息:beginStr:${beginStr},endStr:${endStr}"
 while true; do
-  python_return_code=$(python checkHiveDataUtil.py --table ${table} --partitionDt ${partitionDt} --partitionHh ${partitionHh})
+  python_return_code=$(python checkHiveDataUtil.py --table ${table} --beginStr ${beginStr} --endStr ${endStr})
   echo "python 返回值:${python_return_code}"
   if [ $python_return_code -eq 0 ]; then
     echo "Python程序返回0,校验存在数据,退出循环。"
@@ -25,4 +28,15 @@ while true; do
     echo "最长等待时间已到,失败:${current_hour}-${current_minute}"
     exit 1
   fi
-done
+done
+
+# 1 生产原始数据
+#nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+#--class com.aliyun.odps.spark.examples.makedata.makedata_13_originData_20240529 \
+#--master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
+#./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+#tablePart:64 repartition:32 \
+#beginStr:${beginStr} endStr:${endStr} \
+#savePath:/dw/recommend/model/13_sample_data/ \
+#table:${beginStr} \
+#> p13_2024061600.log 2>&1 &

+ 278 - 0
src/main/scala/com/aliyun/odps/spark/examples/makdir_qiao/makedata_13_originData_20240705.scala

@@ -0,0 +1,278 @@
+package com.aliyun.odps.spark.examples.makdir_qiao
+
+import com.alibaba.fastjson.{JSON, JSONObject}
+import com.aliyun.odps.TableSchema
+import com.aliyun.odps.data.Record
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils, env}
+import examples.extractor.RankExtractorFeature_20240530
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+import org.xm.Similarity
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+/*
+   20240608 提取特征
+ */
+
+object makedata_13_originData_20240705 {
+  def main(args: Array[String]): Unit = {
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val tablePart = param.getOrElse("tablePart", "64").toInt
+    val beginStr = param.getOrElse("beginStr", "2023010100")
+    val endStr = param.getOrElse("endStr", "2023010123")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/13_sample_data/")
+    val project = param.getOrElse("project", "loghubods")
+    val table = param.getOrElse("table", "XXXX")
+    val repartition = param.getOrElse("repartition", "100").toInt
+
+    // 2 读取odps+表信息
+    val odpsOps = env.getODPS(sc)
+
+    // 3 循环执行数据生产
+    val timeRange = MyDateUtils.getDateHourRange(beginStr, endStr)
+    for (dt_hh <- timeRange) {
+      val dt = dt_hh.substring(0, 8)
+      val hh = dt_hh.substring(8, 10)
+      val partition = s"dt=$dt,hh=$hh"
+      println("开始执行partiton:" + partition)
+      val odpsData = odpsOps.readTable(project = project,
+        table = table,
+        partition = partition,
+        transfer = func,
+        numPartition = tablePart)
+        .map(record => {
+
+          val featureMap = new JSONObject()
+
+          // a 视频特征
+          val b1: JSONObject = if (record.isNull("b1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b1_feature"))
+          val b2: JSONObject = if (record.isNull("b2_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b2_feature"))
+          val b3: JSONObject = if (record.isNull("b3_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b3_feature"))
+          val b6: JSONObject = if (record.isNull("b6_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b6_feature"))
+          val b7: JSONObject = if (record.isNull("b7_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b7_feature"))
+
+          val b8: JSONObject = if (record.isNull("b8_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b8_feature"))
+          val b9: JSONObject = if (record.isNull("b9_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b9_feature"))
+          val b10: JSONObject = if (record.isNull("b10_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b10_feature"))
+          val b11: JSONObject = if (record.isNull("b11_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b11_feature"))
+          val b12: JSONObject = if (record.isNull("b12_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b12_feature"))
+          val b13: JSONObject = if (record.isNull("b13_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b13_feature"))
+          val b17: JSONObject = if (record.isNull("b17_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b17_feature"))
+          val b18: JSONObject = if (record.isNull("b18_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b18_feature"))
+          val b19: JSONObject = if (record.isNull("b19_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("b19_feature"))
+
+
+          val origin_data = List(
+            (b1, b2, b3, "b123"), (b1, b6, b7, "b167"),
+            (b8, b9, b10, "b8910"), (b11, b12, b13, "b111213"),
+            (b17, b18, b19, "b171819")
+          )
+          for ((b_1, b_2, b_3, prefix1) <- origin_data){
+            for (prefix2 <- List(
+              "1h", "2h", "3h", "4h", "12h", "1d", "3d", "7d"
+            )){
+              val exp = if (b_1.isEmpty) 0D else b_1.getIntValue("exp_pv_" + prefix2).toDouble
+              val share = if (b_2.isEmpty) 0D else b_2.getIntValue("share_pv_" + prefix2).toDouble
+              val returns = if (b_3.isEmpty) 0D else b_3.getIntValue("return_uv_" + prefix2).toDouble
+              val f1 = RankExtractorFeature_20240530.calDiv(share, exp)
+              val f2 = RankExtractorFeature_20240530.calLog(share)
+              val f3 = RankExtractorFeature_20240530.calDiv(returns, exp)
+              val f4 = RankExtractorFeature_20240530.calLog(returns)
+              val f5 = f3 * f4
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "STR", f1)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "log(share)", f2)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROV", f3)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "log(return)", f4)
+              featureMap.put(prefix1 + "_" + prefix2 + "_" + "ROV*log(return)", f5)
+            }
+          }
+
+          val video_info: JSONObject = if (record.isNull("t_v_info_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("t_v_info_feature"))
+          featureMap.put("total_time", if (video_info.containsKey("total_time")) video_info.getIntValue("total_time").toDouble else 0D)
+          featureMap.put("bit_rate", if (video_info.containsKey("bit_rate")) video_info.getIntValue("bit_rate").toDouble else 0D)
+
+          val c1: JSONObject = if (record.isNull("c1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("c1_feature"))
+          if (c1.nonEmpty){
+            featureMap.put("playcnt_6h", if (c1.containsKey("playcnt_6h")) c1.getIntValue("playcnt_6h").toDouble else 0D)
+            featureMap.put("playcnt_1d", if (c1.containsKey("playcnt_1d")) c1.getIntValue("playcnt_1d").toDouble else 0D)
+            featureMap.put("playcnt_3d", if (c1.containsKey("playcnt_3d")) c1.getIntValue("playcnt_3d").toDouble else 0D)
+            featureMap.put("playcnt_7d", if (c1.containsKey("playcnt_7d")) c1.getIntValue("playcnt_7d").toDouble else 0D)
+          }
+          val c2: JSONObject = if (record.isNull("c2_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("c2_feature"))
+          if (c2.nonEmpty){
+            featureMap.put("share_pv_12h", if (c2.containsKey("share_pv_12h")) c2.getIntValue("share_pv_12h").toDouble else 0D)
+            featureMap.put("share_pv_1d", if (c2.containsKey("share_pv_1d")) c2.getIntValue("share_pv_1d").toDouble else 0D)
+            featureMap.put("share_pv_3d", if (c2.containsKey("share_pv_3d")) c2.getIntValue("share_pv_3d").toDouble else 0D)
+            featureMap.put("share_pv_7d", if (c2.containsKey("share_pv_7d")) c2.getIntValue("share_pv_7d").toDouble else 0D)
+            featureMap.put("return_uv_12h", if (c2.containsKey("return_uv_12h")) c2.getIntValue("return_uv_12h").toDouble else 0D)
+            featureMap.put("return_uv_1d", if (c2.containsKey("return_uv_1d")) c2.getIntValue("return_uv_1d").toDouble else 0D)
+            featureMap.put("return_uv_3d", if (c2.containsKey("return_uv_3d")) c2.getIntValue("return_uv_3d").toDouble else 0D)
+            featureMap.put("return_uv_7d", if (c2.containsKey("return_uv_7d")) c2.getIntValue("return_uv_7d").toDouble else 0D)
+          }
+
+          val title = if (video_info.containsKey("title")) video_info.getString("title") else ""
+          if (!title.equals("")){
+            for (key_feature <- List("c3_feature", "c4_feature", "c5_feature", "c6_feature", "c7_feature")){
+              val c34567: JSONObject = if (record.isNull(key_feature)) new JSONObject() else
+                JSON.parseObject(record.getString(key_feature))
+              for (key_time <- List("tags_1d", "tags_3d", "tags_7d")) {
+                val tags = if (c34567.containsKey(key_time)) c34567.getString(key_time) else ""
+                if (!tags.equals("")){
+                  val (f1, f2, f3, f4) = funcC34567ForTags(tags, title)
+                  featureMap.put(key_feature + "_" + key_time + "_matchnum", f1)
+                  featureMap.put(key_feature + "_" + key_time + "_maxscore", f3)
+                  featureMap.put(key_feature + "_" + key_time + "_avgscore", f4)
+                }
+              }
+            }
+          }
+
+          val vid = if (record.isNull("vid")) "" else record.getString("vid")
+          if (!vid.equals("")){
+            for (key_feature <- List("c8_feature", "c9_feature")){
+              val c89: JSONObject = if (record.isNull(key_feature)) new JSONObject() else
+                JSON.parseObject(record.getString(key_feature))
+              for (key_action <- List("share", "return")){
+                  val cfListStr = if (c89.containsKey(key_action)) c89.getString(key_action) else ""
+                  if (!cfListStr.equals("")){
+                    val cfMap = cfListStr.split(",").map(r =>{
+                      val rList = r.split(":")
+                      (rList(0), (rList(1), rList(2), rList(3)))
+                    }).toMap
+                    if (cfMap.contains(vid)){
+                      val (score, num, rank) = cfMap(vid)
+                      featureMap.put(key_feature + "_" + key_action + "_score", score.toDouble)
+                      featureMap.put(key_feature + "_" + key_action + "_num", num.toDouble)
+                      featureMap.put(key_feature + "_" + key_action + "_rank", 1.0 / rank.toDouble)
+                    }
+                  }
+              }
+            }
+          }
+
+          val d1: JSONObject = if (record.isNull("d1_feature")) new JSONObject() else
+            JSON.parseObject(record.getString("d1_feature"))
+          if (d1.nonEmpty){
+            featureMap.put("d1_exp", if (d1.containsKey("exp")) d1.getString("exp").toDouble else 0D)
+            featureMap.put("d1_return_n", if (d1.containsKey("return_n")) d1.getString("return_n").toDouble else 0D)
+            featureMap.put("d1_rovn", if (d1.containsKey("rovn")) d1.getString("rovn").toDouble else 0D)
+          }
+
+
+          /*
+
+
+          视频:
+          曝光使用pv 分享使用pv 回流使用uv --> 1h 2h 3h 4h 12h 1d 3d 7d
+          STR log(share) ROV log(return) ROV*log(return)
+          40个特征组合
+          整体、整体曝光对应、推荐非冷启root、推荐冷启root、分省份root
+          200个特征值
+
+          视频:
+          视频时长、比特率
+
+          人:
+          播放次数 --> 6h 1d 3d 7d --> 4个
+          带回来的分享pv 回流uv --> 12h 1d 3d 7d --> 8个
+          人+vid-title:
+          播放点/回流点/分享点/累积分享/累积回流 --> 1d 3d 7d --> 匹配数量 语义最高相似度分 语义平均相似度分 --> 45个
+          人+vid-cf
+          基于分享行为/基于回流行为 -->  “分享cf”+”回流点击cf“ 相似分 相似数量 相似rank的倒数 --> 12个
+
+          头部视频:
+          曝光 回流 ROVn 3个特征
+
+          场景:
+          小时 星期 apptype city province pagesource 机器型号
+           */
+
+
+
+          //4 处理label信息。
+          val labels = new JSONObject
+          for (labelKey <- List(
+            "is_play", "is_share", "is_return", "noself_is_return", "return_uv", "noself_return_uv", "total_return_uv",
+            "share_pv", "total_share_uv"
+          )){
+            if (!record.isNull(labelKey)){
+              labels.put(labelKey, record.getString(labelKey))
+            }
+          }
+          //5 处理log key表头。
+          val apptype = record.getString("apptype")
+          val pagesource = record.getString("pagesource")
+          val mid = record.getString("mid")
+          // vid 已经提取了
+          val ts = record.getString("ts")
+          val abcode = record.getString("abcode")
+          val level = if (record.isNull("level")) "0" else record.getString("level")
+          val logKey = (apptype, pagesource, mid, vid, ts, abcode, level).productIterator.mkString(",")
+          val labelKey = labels.toString()
+          val featureKey = featureMap.toString()
+          //6 拼接数据,保存。
+          logKey + "\t" + labelKey + "\t" + featureKey
+
+        })
+
+      // 4 保存数据到hdfs
+      val savePartition = dt + hh
+      val hdfsPath = savePath + "/" + savePartition
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")){
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        odpsData.coalesce(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      }else{
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+  }
+
+  def func(record: Record, schema: TableSchema): Record = {
+    record
+  }
+  def funcC34567ForTags(tags: String, title: String): Tuple4[Double, String, Double, Double] = {
+    // 匹配数量 匹配词 语义最高相似度分 语义平均相似度分
+    val tagsList = tags.split(",")
+    var d1 = 0.0
+    val d2 = new ArrayBuffer[String]()
+    var d3 = 0.0
+    var d4 = 0.0
+    for (tag <- tagsList){
+      if (title.contains(tag)){
+        d1 = d1 + 1.0
+        d2.add(tag)
+      }
+      val score = Similarity.conceptSimilarity(tag, title)
+      d3 = if (score > d3) score else d3
+      d4 = d4 + score
+    }
+    d4 = if (tagsList.nonEmpty) d4 / tagsList.size else d4
+    (d1, d2.mkString(","), d3, d4)
+  }
+}