|
@@ -2,6 +2,7 @@ package com.tzld.piaoquan.recommend.feature.produce;
|
|
|
|
|
|
import com.tzld.piaoquan.recommend.feature.produce.service.OSSService;
|
|
import com.tzld.piaoquan.recommend.feature.produce.service.OSSService;
|
|
import lombok.extern.slf4j.Slf4j;
|
|
import lombok.extern.slf4j.Slf4j;
|
|
|
|
+import org.apache.commons.lang.math.NumberUtils;
|
|
import org.apache.commons.lang3.StringUtils;
|
|
import org.apache.commons.lang3.StringUtils;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.JavaRDD;
|
|
import org.apache.spark.api.java.JavaRDD;
|
|
@@ -17,7 +18,10 @@ import java.util.List;
|
|
public class VideoCleanExecutor {
|
|
public class VideoCleanExecutor {
|
|
public static void main(String[] args) {
|
|
public static void main(String[] args) {
|
|
String file = args[0];
|
|
String file = args[0];
|
|
- int repartition = Integer.valueOf(args[1]);
|
|
|
|
|
|
+ int repartition = NumberUtils.toInt(args[1], 160);
|
|
|
|
+ int sync = NumberUtils.toInt(args[2], 0);
|
|
|
|
+
|
|
|
|
+
|
|
log.info("hdfs file {}", file);
|
|
log.info("hdfs file {}", file);
|
|
SparkConf sparkConf = new SparkConf()
|
|
SparkConf sparkConf = new SparkConf()
|
|
//.setMaster("local")
|
|
//.setMaster("local")
|
|
@@ -34,7 +38,11 @@ public class VideoCleanExecutor {
|
|
String[] data = StringUtils.split(s.next(), "\t");
|
|
String[] data = StringUtils.split(s.next(), "\t");
|
|
objectNames.add(data[2]);
|
|
objectNames.add(data[2]);
|
|
}
|
|
}
|
|
- ossService.transToDeepColdArchive("art-pubbucket", objectNames);
|
|
|
|
|
|
+ if (sync == 1) {
|
|
|
|
+ ossService.transToDeepColdArchive("art-pubbucket", objectNames);
|
|
|
|
+ } else {
|
|
|
|
+ ossService.transToDeepColdArchive2("art-pubbucket", objectNames);
|
|
|
|
+ }
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
|