Ver Fonte

Add makedata_ad_32_bucket_20250110

StrayWarrior há 3 meses atrás
pai
commit
e76c9ed4a7

+ 113 - 0
src/main/scala/com/aliyun/odps/spark/examples/makedata_ad/v20240718/makedata_ad_32_bucket_20250110.scala

@@ -0,0 +1,113 @@
+package com.aliyun.odps.spark.examples.makedata_ad.v20240718
+
+import com.alibaba.fastjson.JSON
+import com.aliyun.odps.spark.examples.myUtils.{MyHdfsUtils, ParamUtils}
+import org.apache.hadoop.io.compress.GzipCodec
+import org.apache.spark.Partitioner
+import org.apache.spark.sql.SparkSession
+
+import scala.collection.JavaConversions._
+import scala.collection.mutable.ArrayBuffer
+import scala.io.Source
+/*
+
+ */
+
+object makedata_ad_32_bucket_20250110 {
+  class FeaturePartitioner(featureNames: Map[String, Int]) extends Partitioner {
+    override def numPartitions: Int = featureNames.size
+    override def getPartition(key: Any): Int = {
+      val featureName = key.asInstanceOf[String]
+      featureNames.getOrElse(featureName, 0)
+    }
+  }
+
+  def main(args: Array[String]): Unit = {
+
+    val spark = SparkSession
+      .builder()
+      .appName(this.getClass.getName)
+      .getOrCreate()
+    val sc = spark.sparkContext
+
+    // 1 读取参数
+    val param = ParamUtils.parseArgs(args)
+    val readPath = param.getOrElse("readPath", "/dw/recommend/model/31_ad_sample_data/20240620*")
+    val savePath = param.getOrElse("savePath", "/dw/recommend/model/32_bucket_data/")
+    val fileName = param.getOrElse("fileName", "20240620_100")
+    val sampleRate = param.getOrElse("sampleRate", "1.0").toDouble
+    val bucketNum = param.getOrElse("bucketNum", "100").toInt
+    val featureNameFile = param.getOrElse("featureNameFile", "20240718_ad_feature_name.txt");
+
+
+    val loader = getClass.getClassLoader
+    val resourceUrl = loader.getResource(featureNameFile)
+    val featureNameContent =
+      if (resourceUrl != null) {
+        val content = Source.fromURL(resourceUrl).getLines().mkString("\n")
+        Source.fromURL(resourceUrl).close()
+        content
+      } else {
+        ""
+      }
+    println(featureNameContent)
+
+    val featureNames = featureNameContent.split("\n")
+      .map(r=> r.replace(" ", "").replaceAll("\n", ""))
+      .filter(r=> r.nonEmpty).toList
+    val featureNamesSet = featureNames.toSet
+
+    val data = sc.textFile(readPath)
+
+    // 2 读取特征数据、打平后分区
+    val flattenData = data
+      .sample(false, sampleRate)
+      .flatMap(r => {
+        val rList = r.split("\t")
+        val jsons = JSON.parseObject(rList(2))
+        jsons.map(r => (r._1, jsons.getDoubleValue(r._1)))
+      }).filter(r => r._2 > 1E-8)
+      .filter(r => featureNamesSet.contains(r._1))
+      .partitionBy(new FeaturePartitioner(featureNames.zipWithIndex.toMap))
+
+    // 3 计算分桶值
+    val resultRdd = flattenData.mapPartitions(iter => {
+      if (iter.isEmpty) {
+        Array[String]().iterator
+      } else {
+        val headValue = iter.next()
+        val key = headValue._1
+        val sortedValues = (Array(headValue._2) ++ iter.map(_._2).toArray).sorted
+        val len = sortedValues.length
+        val oneBucketNum = (len - 1) / (bucketNum - 1) + 1 // 确保每个桶至少有一个元素
+        val buffers = new ArrayBuffer[Double]()
+
+        var lastBucketValue = sortedValues(0) // 记录上一个桶的切分点
+        for (j <- 0 until len by oneBucketNum) {
+          val d = sortedValues(j)
+          if (j > 0 && d != lastBucketValue) {
+            // 如果当前切分点不同于上一个切分点,则保存当前切分点
+            buffers += d
+          }
+          lastBucketValue = d // 更新上一个桶的切分点
+        }
+
+        // 最后一个桶的结束点应该是数组的最后一个元素
+        if (!buffers.contains(sortedValues.last)) {
+          buffers += sortedValues.last
+        }
+        Array(key + "\t" + bucketNum.toString + "\t" + buffers.mkString(",")).iterator
+      }
+    })
+
+    // 4 保存数据到hdfs
+    val hdfsPath = savePath + "/" + fileName
+    if (hdfsPath.nonEmpty && fileName.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
+      println("删除路径并开始数据写入:" + hdfsPath)
+      MyHdfsUtils.delete_hdfs_path(hdfsPath)
+      resultRdd.repartition(1).saveAsTextFile(hdfsPath, classOf[GzipCodec])
+    } else {
+      println("路径不合法,无法写入:" + hdfsPath)
+    }
+  }
+}