|
@@ -0,0 +1,75 @@
|
|
|
+package com.tzld.piaoquan.recommend.model
|
|
|
+
|
|
|
+import ml.dmlc.xgboost4j.scala.spark.XGBoostClassifier
|
|
|
+import org.apache.commons.lang.math.NumberUtils
|
|
|
+import org.apache.commons.lang3.StringUtils
|
|
|
+import org.apache.hadoop.io.compress.GzipCodec
|
|
|
+import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
|
|
|
+import org.apache.spark.ml.feature.VectorAssembler
|
|
|
+import org.apache.spark.rdd.RDD
|
|
|
+import org.apache.spark.sql.types.DataTypes
|
|
|
+import org.apache.spark.sql.{Dataset, Row, SparkSession}
|
|
|
+
|
|
|
+import java.util
|
|
|
+import scala.io.Source
|
|
|
+
|
|
|
+object ana_01_xgb_ad_20240809{
|
|
|
+ def main(args: Array[String]): Unit = {
|
|
|
+ val spark = SparkSession
|
|
|
+ .builder()
|
|
|
+ .appName(this.getClass.getName)
|
|
|
+ .getOrCreate()
|
|
|
+ val sc = spark.sparkContext
|
|
|
+
|
|
|
+ val param = ParamUtils.parseArgs(args)
|
|
|
+ val savePath = param.getOrElse("savePath", "/dw/recommend/model/34_ad_predict_data/")
|
|
|
+
|
|
|
+ val hdfsPath = savePath
|
|
|
+
|
|
|
+ sc.textFile(hdfsPath).map(r=>{
|
|
|
+ val rList = r.split("\t")
|
|
|
+ val cid = rList(3)
|
|
|
+ val score = rList(2).replace("[", "").replace("]", "")
|
|
|
+ .split(",")(1).toDouble
|
|
|
+ val label = rList(0).toDouble
|
|
|
+ (cid, (1, label, score))
|
|
|
+ }).reduceByKey{
|
|
|
+ case (a, b) => (a._1 + b._1, a._2 + b._2, a._3 + b._3)
|
|
|
+ }.map{
|
|
|
+ case (cid, (all, zheng, scores)) =>
|
|
|
+ (cid, all, zheng, scores, zheng / all, scores / all)
|
|
|
+ }.collect().sortBy(_._1).map(_.productIterator.mkString("\t")).foreach(println)
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|