|
@@ -0,0 +1,146 @@
|
|
|
|
+package com.aliyun.odps.spark.examples.makedata_ad.v20240718
|
|
|
|
+
|
|
|
|
+import com.alibaba.fastjson.JSON
|
|
|
|
+import com.aliyun.odps.TableSchema
|
|
|
|
+import com.aliyun.odps.data.Record
|
|
|
|
+import com.aliyun.odps.spark.examples.myUtils.{MyDateUtils, ParamUtils, env}
|
|
|
|
+import examples.extractor.ExtractorUtils
|
|
|
|
+import org.apache.spark.sql.SparkSession
|
|
|
|
+
|
|
|
|
+import scala.collection.JavaConversions._
|
|
|
|
+import scala.io.Source
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+object makedata_ad_33_bucketDataToHive_20250110 {
|
|
|
|
+ def main(args: Array[String]): Unit = {
|
|
|
|
+
|
|
|
|
+ val spark = SparkSession
|
|
|
|
+ .builder()
|
|
|
|
+ .appName(this.getClass.getName)
|
|
|
|
+ .getOrCreate()
|
|
|
|
+ val sc = spark.sparkContext
|
|
|
|
+
|
|
|
|
+ // 2 读取odps+表信息
|
|
|
|
+ val odpsOps = env.getODPS(sc)
|
|
|
|
+
|
|
|
|
+ val loader = getClass.getClassLoader
|
|
|
|
+
|
|
|
|
+ val resourceUrlBucket = loader.getResource("20250217_ad_bucket_688.txt")
|
|
|
|
+ val buckets =
|
|
|
|
+ if (resourceUrlBucket != null) {
|
|
|
|
+ val buckets = Source.fromURL(resourceUrlBucket).getLines().mkString("\n")
|
|
|
|
+ Source.fromURL(resourceUrlBucket).close()
|
|
|
|
+ buckets
|
|
|
|
+ } else {
|
|
|
|
+ ""
|
|
|
|
+ }
|
|
|
|
+ println(buckets)
|
|
|
|
+ val bucketsMap = buckets.split("\n")
|
|
|
|
+ .map(r => r.replace(" ", "").replaceAll("\n", ""))
|
|
|
|
+ .filter(r => r.nonEmpty)
|
|
|
|
+ .map(r => {
|
|
|
|
+ val rList = r.split("\t")
|
|
|
|
+ (rList(0), (rList(1).toDouble, rList(2).split(",").map(_.toDouble)))
|
|
|
|
+ }).toMap
|
|
|
|
+ val bucketsMap_br = sc.broadcast(bucketsMap)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // 1 读取参数
|
|
|
|
+ val param = ParamUtils.parseArgs(args)
|
|
|
|
+ val readPath = param.getOrElse("readPath", "/dw/recommend/model/31_ad_sample_data_v5/")
|
|
|
|
+ val beginStr = param.getOrElse("beginStr", "20250213")
|
|
|
|
+ val endStr = param.getOrElse("endStr", "20250213")
|
|
|
|
+ val filterNames = param.getOrElse("filterNames", "").split(",").filter(_.nonEmpty).toSet
|
|
|
|
+ val whatLabel = param.getOrElse("whatLabel", "ad_is_conversion")
|
|
|
|
+ val project = param.getOrElse("project", "loghubods")
|
|
|
|
+ val table = param.getOrElse("table", "ad_easyrec_train_data_v2")
|
|
|
|
+ val partition = param.getOrElse("partition", "dt=20250208")
|
|
|
|
+
|
|
|
|
+ val dateRange = MyDateUtils.getDateRange(beginStr, endStr)
|
|
|
|
+ for (date <- dateRange) {
|
|
|
|
+ println("开始执行:" + date)
|
|
|
|
+ val data = sc.textFile(readPath + "/" + date + "*").map(r => {
|
|
|
|
+ val rList = r.split("\t")
|
|
|
|
+ val logKey = rList(0)
|
|
|
|
+ val labelKey = rList(1)
|
|
|
|
+ val jsons = JSON.parseObject(rList(2))
|
|
|
|
+ val features = scala.collection.mutable.Map[String, Double]()
|
|
|
|
+ jsons.foreach(r => {
|
|
|
|
+ features.put(r._1, jsons.getDoubleValue(r._1))
|
|
|
|
+ })
|
|
|
|
+ (logKey, labelKey, features)
|
|
|
|
+ })
|
|
|
|
+ val list = data
|
|
|
|
+ .filter {
|
|
|
|
+ case (logKey, labelKey, features) =>
|
|
|
|
+ val logKeyList = logKey.split(",")
|
|
|
|
+ val apptype = logKeyList(0)
|
|
|
|
+ !Set("12", "13").contains(apptype)
|
|
|
|
+ }
|
|
|
|
+ .map {
|
|
|
|
+ case (logKey, labelKey, features) =>
|
|
|
|
+ val label = JSON.parseObject(labelKey).getOrDefault(whatLabel, "0").toString
|
|
|
|
+ val bucketsMap = bucketsMap_br.value
|
|
|
|
+ var resultMap = features.collect {
|
|
|
|
+ case (name, score) if !filterNames.exists(name.contains) && score > 1E-8 =>
|
|
|
|
+ var key = name.replace("*", "_x_").replace("(view)", "_view")
|
|
|
|
+ if (key == "ad_is_click") {
|
|
|
|
+ key = "has_click"
|
|
|
|
+ }
|
|
|
|
+ val value = if (bucketsMap.contains(name)) {
|
|
|
|
+ val (bucketsNum, buckets) = bucketsMap(name)
|
|
|
|
+ 1.0 / bucketsNum * (ExtractorUtils.findInsertPosition(buckets, score).toDouble + 1.0)
|
|
|
|
+ } else {
|
|
|
|
+ score
|
|
|
|
+ }
|
|
|
|
+ key -> value.toString
|
|
|
|
+ }.toMap
|
|
|
|
+ resultMap += ("has_conversion" -> label)
|
|
|
|
+ resultMap += ("logkey" -> logKey)
|
|
|
|
+ resultMap
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // 4 hive
|
|
|
|
+ odpsOps.saveToTable(project, table, partition, list, write, defaultCreate = true, overwrite = true)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ def write(map: Map[String, String], record: Record, schema: TableSchema): Unit = {
|
|
|
|
+ for ((columnName, value) <- map) {
|
|
|
|
+ try {
|
|
|
|
+ // 查找列名在表结构中的索引
|
|
|
|
+ val columnIndex = schema.getColumnIndex(columnName)
|
|
|
|
+ // 获取列的类型
|
|
|
|
+ val columnType = schema.getColumn(columnIndex).getTypeInfo
|
|
|
|
+ try {
|
|
|
|
+ columnType.getTypeName match {
|
|
|
|
+ case "STRING" =>
|
|
|
|
+ record.setString(columnIndex, value.toString)
|
|
|
|
+ case "BIGINT" =>
|
|
|
|
+ record.setBigint(columnIndex, value.toString.toLong)
|
|
|
|
+ case "DOUBLE" =>
|
|
|
|
+ record.setDouble(columnIndex, value.toString.toDouble)
|
|
|
|
+ case "BOOLEAN" =>
|
|
|
|
+ record.setBoolean(columnIndex, value.toString.toBoolean)
|
|
|
|
+ case other =>
|
|
|
|
+ throw new IllegalArgumentException(s"Unsupported column type: $other")
|
|
|
|
+ }
|
|
|
|
+ } catch {
|
|
|
|
+ case e: NumberFormatException =>
|
|
|
|
+ println(s"Error converting value $value to type ${columnType.getTypeName} for column $columnName: ${e.getMessage}")
|
|
|
|
+ case e: Exception =>
|
|
|
|
+ println(s"Unexpected error writing value $value to column $columnName: ${e.getMessage}")
|
|
|
|
+ }
|
|
|
|
+ } catch {
|
|
|
|
+ case e: IllegalArgumentException => {
|
|
|
|
+ println(e.getMessage)
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|