|
@@ -0,0 +1,217 @@
|
|
|
|
+package com.tzld.piaoquan.recommend.model
|
|
|
|
+
|
|
|
|
+import com.alibaba.fastjson.JSON
|
|
|
|
+import com.tzld.piaoquan.recommend.utils.{MyHdfsUtils, ParamUtils}
|
|
|
|
+import ml.dmlc.xgboost4j.scala.spark.XGBoostClassifier
|
|
|
|
+import org.apache.commons.lang.math.NumberUtils
|
|
|
|
+import org.apache.commons.lang3.StringUtils
|
|
|
|
+import org.apache.hadoop.io.compress.GzipCodec
|
|
|
|
+import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
|
|
|
|
+import org.apache.spark.ml.feature.VectorAssembler
|
|
|
|
+import org.apache.spark.rdd.RDD
|
|
|
|
+import org.apache.spark.sql.types.DataTypes
|
|
|
|
+import org.apache.spark.sql.{Dataset, Row, SparkSession}
|
|
|
|
+
|
|
|
|
+import java.time.LocalDateTime
|
|
|
|
+import java.time.format.DateTimeFormatter
|
|
|
|
+import java.util
|
|
|
|
+import scala.io.Source
|
|
|
|
+
|
|
|
|
+object recsys_01_ros_binary_weight_xgb_train {
|
|
|
|
+ def main(args: Array[String]): Unit = {
|
|
|
|
+
|
|
|
|
+ val dt = DateTimeFormatter.ofPattern("yyyyMMddHHmm").format(LocalDateTime.now())
|
|
|
|
+
|
|
|
|
+ val spark = SparkSession
|
|
|
|
+ .builder()
|
|
|
|
+ .appName(this.getClass.getName + " : " + dt)
|
|
|
|
+ .getOrCreate()
|
|
|
|
+ val sc = spark.sparkContext
|
|
|
|
+
|
|
|
|
+ val param = ParamUtils.parseArgs(args)
|
|
|
|
+ val featureFile = param.getOrElse("featureFile", "20240703_ad_feature_name.txt")
|
|
|
|
+ val trainPath = param.getOrElse("trainPath", "/dw/recommend/model/43_recsys_ros_data_bucket/20250301")
|
|
|
|
+ val testPath = param.getOrElse("testPath", "/dw/recommend/model/43_recsys_ros_data_bucket/20250302")
|
|
|
|
+ val savePath = param.getOrElse("savePath", "/dw/recommend/model/44_recsys_ros_predict/")
|
|
|
|
+ val featureFilter = param.getOrElse("featureFilter", "XXXXXX").split(",")
|
|
|
|
+ val eta = param.getOrElse("eta", "0.01").toDouble
|
|
|
|
+ val gamma = param.getOrElse("gamma", "0.0").toDouble
|
|
|
|
+ val max_depth = param.getOrElse("max_depth", "5").toInt
|
|
|
|
+ val num_round = param.getOrElse("num_round", "100").toInt
|
|
|
|
+ val num_worker = param.getOrElse("num_worker", "20").toInt
|
|
|
|
+ val func_object = param.getOrElse("func_object", "binary:logistic")
|
|
|
|
+ val func_metric = param.getOrElse("func_metric", "auc")
|
|
|
|
+ val repartition = param.getOrElse("repartition", "20").toInt
|
|
|
|
+ val subsample = param.getOrElse("subsample", "0.8").toDouble
|
|
|
|
+ val modelPath = param.getOrElse("modelPath", "/dw/recommend/model/45_recommend_model/20250310_ros_binary_weight_1000")
|
|
|
|
+ val modelFile = param.getOrElse("modelFile", "model.tar.gz")
|
|
|
|
+ val weightField = param.getOrElse("weight_field", "return_n_uv");
|
|
|
|
+
|
|
|
|
+ val loader = getClass.getClassLoader
|
|
|
|
+ val resourceUrl = loader.getResource(featureFile)
|
|
|
|
+ val content =
|
|
|
|
+ if (resourceUrl != null) {
|
|
|
|
+ val content = Source.fromURL(resourceUrl).getLines().mkString("\n")
|
|
|
|
+ Source.fromURL(resourceUrl).close()
|
|
|
|
+ content
|
|
|
|
+ } else {
|
|
|
|
+ ""
|
|
|
|
+ }
|
|
|
|
+ println(content)
|
|
|
|
+
|
|
|
|
+ val features = content.split("\n")
|
|
|
|
+ .map(r => r.replace(" ", "").replaceAll("\n", ""))
|
|
|
|
+ .filter(r => r.nonEmpty || !featureFilter.contains(r))
|
|
|
|
+ println("features.size=" + features.length)
|
|
|
|
+
|
|
|
|
+ val trainData = createData4Ad(
|
|
|
|
+ sc.textFile(trainPath),
|
|
|
|
+ features,
|
|
|
|
+ weightField
|
|
|
|
+ )
|
|
|
|
+ println("zhangbo:train data size:" + trainData.count())
|
|
|
|
+
|
|
|
|
+ var fields = Array(
|
|
|
|
+ DataTypes.createStructField("label", DataTypes.IntegerType, true)
|
|
|
|
+ ) ++ features.map(f => DataTypes.createStructField(f, DataTypes.DoubleType, true))
|
|
|
|
+
|
|
|
|
+ fields = fields ++ Array(
|
|
|
|
+ DataTypes.createStructField("logKey", DataTypes.StringType, true),
|
|
|
|
+ DataTypes.createStructField("weight", DataTypes.DoubleType, true)
|
|
|
|
+ )
|
|
|
|
+ val schema = DataTypes.createStructType(fields)
|
|
|
|
+ val trainDataSet: Dataset[Row] = spark.createDataFrame(trainData, schema)
|
|
|
|
+ val vectorAssembler = new VectorAssembler().setInputCols(features).setOutputCol("features")
|
|
|
|
+ val xgbInput = vectorAssembler.transform(trainDataSet).select("features", "label", "weight")
|
|
|
|
+ // val xgbParam = Map("eta" -> 0.01f,
|
|
|
|
+ // "max_depth" -> 5,
|
|
|
|
+ // "objective" -> "binary:logistic",
|
|
|
|
+ // "num_class" -> 3)
|
|
|
|
+ val xgbClassifier = new XGBoostClassifier()
|
|
|
|
+ .setEta(eta)
|
|
|
|
+ .setGamma(gamma)
|
|
|
|
+ .setMissing(0.0f)
|
|
|
|
+ .setMaxDepth(max_depth)
|
|
|
|
+ .setNumRound(num_round)
|
|
|
|
+ .setSubsample(subsample)
|
|
|
|
+ .setColsampleBytree(0.8)
|
|
|
|
+ .setScalePosWeight(1)
|
|
|
|
+ .setObjective(func_object)
|
|
|
|
+ .setEvalMetric(func_metric)
|
|
|
|
+ .setFeaturesCol("features")
|
|
|
|
+ .setLabelCol("label")
|
|
|
|
+ .setWeightCol("weight")
|
|
|
|
+ .setNthread(1)
|
|
|
|
+ .setNumWorkers(num_worker)
|
|
|
|
+ .setSeed(2024)
|
|
|
|
+ .setMinChildWeight(1)
|
|
|
|
+ val model = xgbClassifier.fit(xgbInput)
|
|
|
|
+
|
|
|
|
+ if (modelPath.nonEmpty && modelFile.nonEmpty) {
|
|
|
|
+ model.write.overwrite().save(modelPath)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ val testData = createData4Ad(
|
|
|
|
+ sc.textFile(testPath),
|
|
|
|
+ features,
|
|
|
|
+ weightField
|
|
|
|
+ )
|
|
|
|
+ val testDataSet = spark.createDataFrame(testData, schema)
|
|
|
|
+ val testDataSetTrans = vectorAssembler.transform(testDataSet).select("features", "label", "logKey")
|
|
|
|
+ val predictions = model.transform(testDataSetTrans)
|
|
|
|
+ // [label, features, probability, prediction, rawPrediction]
|
|
|
|
+ println("zhangbo:columns:" + predictions.columns.mkString(","))
|
|
|
|
+ val saveData = predictions.select("label", "rawPrediction", "probability", "logKey").rdd
|
|
|
|
+ .map(r => {
|
|
|
|
+ (r.get(0), r.get(1), r.get(2), r.get(3)).productIterator.mkString("\t")
|
|
|
|
+ })
|
|
|
|
+ val hdfsPath = savePath
|
|
|
|
+ if (hdfsPath.nonEmpty && hdfsPath.startsWith("/dw/recommend/model/")) {
|
|
|
|
+ println("删除路径并开始数据写入:" + hdfsPath)
|
|
|
|
+ MyHdfsUtils.delete_hdfs_path(hdfsPath)
|
|
|
|
+ saveData.repartition(repartition).saveAsTextFile(hdfsPath, classOf[GzipCodec])
|
|
|
|
+ } else {
|
|
|
|
+ println("路径不合法,无法写入:" + hdfsPath)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ val evaluator = new BinaryClassificationEvaluator()
|
|
|
|
+ .setLabelCol("label")
|
|
|
|
+ .setRawPredictionCol("probability")
|
|
|
|
+ .setMetricName("areaUnderROC")
|
|
|
|
+ val auc = evaluator.evaluate(predictions.select("label", "probability"))
|
|
|
|
+ println("zhangbo:auc:" + auc)
|
|
|
|
+
|
|
|
|
+ // 统计分cid的分数
|
|
|
|
+ sc.textFile(hdfsPath).map(r => {
|
|
|
|
+ val rList = r.split("\t")
|
|
|
|
+ val vid = JSON.parseObject(rList(3)).getString("vid")
|
|
|
|
+ val score = rList(2).replace("[", "").replace("]", "")
|
|
|
|
+ .split(",")(1).toDouble
|
|
|
|
+ val label = rList(0).toDouble
|
|
|
|
+ (vid, (1, label, score))
|
|
|
|
+ }).reduceByKey {
|
|
|
|
+ case (a, b) => (a._1 + b._1, a._2 + b._2, a._3 + b._3)
|
|
|
|
+ }.map {
|
|
|
|
+ case (vid, (all, zheng, scores)) =>
|
|
|
|
+ (vid, all, zheng, scores, zheng / all, scores / all)
|
|
|
|
+ }.collect().sortBy(_._1).map(_.productIterator.mkString("\t")).foreach(println)
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ def createData4Ad(data: RDD[String], features: Array[String], weightField: String): RDD[Row] = {
|
|
|
|
+ data.map(r => {
|
|
|
|
+ val line: Array[String] = StringUtils.split(r, '\t')
|
|
|
|
+ val logKey = line(0)
|
|
|
|
+
|
|
|
|
+ val logJson = JSON.parseObject(logKey)
|
|
|
|
+ val weight = logJson.getDouble(weightField);
|
|
|
|
+
|
|
|
|
+ val label: Int = NumberUtils.toInt(line(1))
|
|
|
|
+ val map: util.Map[String, Double] = new util.HashMap[String, Double]
|
|
|
|
+ for (i <- 2 until line.length) {
|
|
|
|
+ val fv: Array[String] = StringUtils.split(line(i), ':')
|
|
|
|
+ map.put(fv(0), NumberUtils.toDouble(fv(1), 0.0))
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ val v: Array[Any] = new Array[Any](features.length + 3)
|
|
|
|
+ v(0) = label
|
|
|
|
+ for (i <- 0 until features.length) {
|
|
|
|
+ v(i + 1) = map.getOrDefault(features(i), 0.0d)
|
|
|
|
+ }
|
|
|
|
+ v(features.length + 1) = logKey
|
|
|
|
+ v(features.length + 2) = weight + 1
|
|
|
|
+ Row(v: _*)
|
|
|
|
+ })
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+//rabit_timeout -> -1
|
|
|
|
+//scale_pos_weight -> 1.0
|
|
|
|
+//seed -> 0
|
|
|
|
+//handle_invalid -> error
|
|
|
|
+//features_col -> features
|
|
|
|
+//label_col -> label
|
|
|
|
+//num_workers -> 1
|
|
|
|
+//subsample -> 0.8
|
|
|
|
+//max_depth -> 5
|
|
|
|
+//probability_col -> probability
|
|
|
|
+//raw_prediction_col -> rawPrediction
|
|
|
|
+//tree_limit -> 0
|
|
|
|
+//dmlc_worker_connect_retry -> 5
|
|
|
|
+//train_test_ratio -> 1.0
|
|
|
|
+//use_external_memory -> false
|
|
|
|
+//objective -> binary:logistic
|
|
|
|
+//eval_metric -> auc
|
|
|
|
+//num_round -> 1000
|
|
|
|
+//missing -> 0.0
|
|
|
|
+//rabit_ring_reduce_threshold -> 32768
|
|
|
|
+//tracker_conf -> TrackerConf(0,python,,)
|
|
|
|
+//eta -> 0.009999999776482582
|
|
|
|
+//colsample_bytree -> 0.8
|
|
|
|
+//allow_non_zero_for_missing -> false
|
|
|
|
+//nthread -> 8
|
|
|
|
+//prediction_col -> prediction
|