Browse Source

检查数据 用metafeaturemap

zhangbo 10 months ago
parent
commit
68a3114a88

+ 132 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata/makedata_16_bucketData_20240609_check.scala

@@ -0,0 +1,132 @@
+package com.aliyun.odps.spark.examples.makedata
+
+import com.alibaba.fastjson.JSON
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, MyHdfsUtils, ParamUtils}
+import examples.extractor.ExtractorUtils
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.sql.SparkSession
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+import scala.io.Source
+/*
+
+ */
+
+object makedata_16_bucketData_20240609_check {
+  def main(args: Array[String]): Unit = {
+
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    val loader = getClass.getClassLoader
+    val resourceUrl = loader.getResource("20240608_feature_name.txt")
+    val content =
+      if (resourceUrl != null) {
+        val content = Source.fromURL(resourceUrl).getLines().mkString("\n")
+        Source.fromURL(resourceUrl).close()
+        content
+      } else {
+        ""
+      }
+    println(content)
+    val contentList = content.split("\n")
+      .map(r=> r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r=> r.nonEmpty).toList
+    val contentList_br = sc.broadcast(contentList)
+
+    val resourceUrlBucket = loader.getResource("20240609_bucket_274.txt")
+    val buckets =
+      if (resourceUrlBucket != null) {
+        val buckets = Source.fromURL(resourceUrlBucket).getLines().mkString("\n")
+        Source.fromURL(resourceUrlBucket).close()
+        buckets
+      } else {
+        ""
+      }
+    println(buckets)
+    val bucketsMap = buckets.split("\n")
+      .map(r => r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r => r.nonEmpty)
+      .map(r =>{
+        val rList = r.split("\t")
+        (rList(0), (rList(1).toDouble, rList(2).split(",").map(_.toDouble)))
+      }).toMap
+    val bucketsMap_br = sc.broadcast(bucketsMap)
+
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val readPath = param.getOrElse("readPath", "/dw/recommend/model/14_feature_data/")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/16_train_data/")
+    val beginStr = param.getOrElse("beginStr", "20240606")
+    val endStr = param.getOrElse("endStr", "20240607")
+    val repartition = param.getOrElse("repartition", "200").toInt
+    val APPSETS = param.getOrElse("APPSETS", "3").split(",").filter(_.nonEmpty).toSet
+    val ABSETS = param.getOrElse("ABSETS", "ab0,ab1,ab2,ab3").split(",").filter(_.startsWith("ab")).toSet
+
+    val dateRange = MyDateUtils.getDateRange(beginStr, endStr)
+    for (date <- dateRange) {
+      println("开始执行:" + date)
+      val data = sc.textFile(readPath + date).map(r=>{
+        val rList = r.split("\t")
+        val logKey = rList(0)
+        val labelKey = rList(1)
+        val features = rList(2).split(",").map(_.toDouble)
+        (logKey, labelKey, features)
+      })
+        .filter{
+          case (logKey, labelKey, features) =>
+            val logKeyList = logKey.split(",")
+            val apptype = logKeyList(0)
+            val pagesource = logKeyList(1)
+            val abcode = logKeyList(5)
+            val level = logKeyList(6)
+            APPSETS.contains(apptype) && pagesource.endsWith("recommend") &&
+              ABSETS.contains(abcode) && level.equals("0")
+        }
+        .map{
+          case (logKey, labelKey, features) =>
+            val label = JSON.parseObject(labelKey).getOrDefault("is_return", "0").toString
+            (label, features)
+        }
+        .mapPartitions(row => {
+        val result = new ArrayBuffer[String]()
+        val contentList = contentList_br.value
+        val bucketsMap = bucketsMap_br.value
+        row.foreach{
+          case (label, features) =>
+            val featuresBucket = contentList.indices.map(i =>{
+              val featureName = contentList(i)
+              val score = features(i)
+              if (score > 1E-8){
+                val (bucketNum, buckets) = bucketsMap(featureName)
+                val scoreNew = 1.0 / bucketNum * (ExtractorUtils.findInsertPosition(buckets, score).toDouble + 1.0)
+                featureName + ":" + scoreNew.toString
+              }else{
+                ""
+              }
+            }).filter(_.nonEmpty)
+            result.add(label + "\t" + featuresBucket.mkString("\t"))
+        }
+        result.iterator
+      })
+
+      // 4 保存数据到hdfs
+      val hdfsPath = savePath + "/" + date
+      if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+        println("删除路径并开始数据写入:" + hdfsPath)
+        MyHdfsUtils.delete_hdfs_path(hdfsPath)
+        data.repartition(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+      } else {
+        println("路径不合法,无法写入:" + hdfsPath)
+      }
+    }
+
+
+
+  }
+}

+ 9 - 0
src/main/scala/com/aliyun/odps/spark/examples/临时记录的脚本

@@ -122,5 +122,14 @@ savePath:/dw/recommend/model/13_sample_data_check_print/ \
 table:alg_recsys_sample_all_new \
 > p13_2024061500_check.log 2>&1 &
 
+nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
+--class com.aliyun.odps.spark.examples.makedata.makedata_16_bucketData_20240609 \
+--master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
+./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
+readPath:/dw/recommend/model/14_feature_data_check/ \
+savePath:/dw/recommend/model/16_train_data_check/ \
+beginStr:20240615 endStr:20240615 repartition:1000 \
+> p16_data_check.log 2>&1 &
+
 /dw/recommend/model/13_sample_data_check/
 /dw/recommend/model/13_sample_data_check_print/