临时记录的脚本 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. 【新 上游样本】
  2. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  3. --class com.aliyun.odps.spark.examples.makedata.makedata_10_originData_v3 \
  4. --master yarn --driver-memory 1G --executor-memory 1G --executor-cores 1 --num-executors 64 \
  5. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  6. tablePart:64 savePath:/dw/recommend/model/10_sample_data_v3/ beginStr:20240227 endStr:20240227 > p10_.log 2>&1 &
  7. [ros样本生产]
  8. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  9. --class com.aliyun.odps.spark.examples.makedata.makedata_12_rosData_v3 \
  10. --master yarn --driver-memory 1G --executor-memory 1G --executor-cores 1 --num-executors 32 \
  11. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  12. savePath:/dw/recommend/model/12_ros_data_v3/ beginStr:20240228 endStr:20240228 ifRepart:10 \
  13. > p12_1.log 2>&1 &
  14. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  15. --class com.aliyun.odps.spark.examples.makedata.makedata_12_rosData_v3_noweight \
  16. --master yarn --driver-memory 1G --executor-memory 1G --executor-cores 1 --num-executors 32 \
  17. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  18. savePath:/dw/recommend/model/12_ros_data_v3_noweight/ beginStr:20240222 endStr:20240226 ifRepart:10 \
  19. > p12_2.log 2>&1 &
  20. [str样本生产]
  21. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  22. --class com.aliyun.odps.spark.examples.makedata.makedata_11_strData_v3 \
  23. --master yarn --driver-memory 1G --executor-memory 1G --executor-cores 1 --num-executors 64 \
  24. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  25. savePath:/dw/recommend/model/11_str_data_v3/ beginStr:20240227 endStr:20240227 ifRepart:100 \
  26. > p11.log 2>&1 &
  27. [user写redis]
  28. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  29. --class com.aliyun.odps.spark.examples.makedata.makedata_09_user2redis_freq \
  30. --name makedata_09_user2redis_freq \
  31. --master yarn --driver-memory 1G --executor-memory 4G --executor-cores 1 --num-executors 32 \
  32. --conf spark.yarn.executor.memoryoverhead=1024 \
  33. /root/zhangbo/recommend-emr-dataprocess/target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  34. date:20240302 tablePart:96 expireDay:3 ifWriteRedisUser:True ifUser:True midDays:14 redisLimit:80000000 \
  35. savePathUser:/dw/recommend/model/09_feature/user/ > p09.log 2>&1 &
  36. --------------
  37. 【旧STR 上游样本】
  38. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  39. --class com.aliyun.odps.spark.examples.makedata.makedata_06_originData \
  40. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 32 \
  41. --conf spark.yarn.executor.memoryoverhead=1024 \
  42. --conf spark.shuffle.service.enabled=true \
  43. --conf spark.shuffle.service.port=7337 \
  44. --conf spark.shuffle.consolidateFiles=true \
  45. --conf spark.shuffle.manager=sort \
  46. --conf spark.storage.memoryFraction=0.4 \
  47. --conf spark.shuffle.memoryFraction=0.5 \
  48. --conf spark.default.parallelism=200 \
  49. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  50. tablePart:64 savePath:/dw/recommend/model/00_sample_data/ beginStr:20240311 endStr:20240312 > p6.log 2>&1 &
  51. 【旧STR 训练数据】
  52. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  53. --class com.aliyun.odps.spark.examples.makedata.makedata_07_strData \
  54. --master yarn --driver-memory 1G --executor-memory 1G --executor-cores 1 --num-executors 32 \
  55. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  56. savePath:/dw/recommend/model/04_str_data/ beginStr:20240311 endStr:20240312 featureVersion:v4 ifRepart:100 \
  57. > p7.log 2>&1 &
  58. ---
  59. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  60. --class com.aliyun.odps.spark.examples.makedata.makedata_13_originData_20240529 \
  61. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  62. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  63. tablePart:64 repartition:32 \
  64. beginStr:2024061600 endStr:2024061623 \
  65. savePath:/dw/recommend/model/13_sample_data/ \
  66. table:alg_recsys_sample_all \
  67. > p13_2024061600.log 2>&1 &
  68. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  69. --class com.aliyun.odps.spark.examples.makedata.makedata_14_valueData_20240608 \
  70. --master yarn --driver-memory 1G --executor-memory 3G --executor-cores 1 --num-executors 32 \
  71. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  72. readPath:/dw/recommend/model/13_sample_data/ \
  73. savePath:/dw/recommend/model/14_feature_data/ \
  74. beginStr:20240615 endStr:20240615 repartition:1000 \
  75. > p14_data_check.log 2>&1 &
  76. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  77. --class com.aliyun.odps.spark.examples.makedata.makedata_15_bucket_20240608 \
  78. --master yarn --driver-memory 16G --executor-memory 1G --executor-cores 1 --num-executors 16 \
  79. --conf spark.driver.maxResultSize=16G \
  80. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  81. readPath:/dw/recommend/model/14_feature_data/20240606/ fileName:20240606_200_v3 \
  82. bucketNum:200 sampleRate:0.1 \
  83. > p15_data2.log 2>&1 &
  84. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  85. --class com.aliyun.odps.spark.examples.makedata.makedata_16_bucketData_20240609 \
  86. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  87. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  88. beginStr:20240615 endStr:20240615 repartition:1000 \
  89. > p16_data.log 2>&1 &
  90. /dw/recommend/model/13_sample_data/
  91. /dw/recommend/model/14_feature_data/
  92. /dw/recommend/model/16_train_data/
  93. -----
  94. 一个执行:只有用线上打印特征的才执行
  95. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  96. --class com.aliyun.odps.spark.examples.makedata.makedata_13_originData_20240529_check \
  97. --master yarn --driver-memory 1G --executor-memory 2G --executor-cores 1 --num-executors 16 \
  98. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  99. tablePart:64 repartition:32 \
  100. beginStr:2024061500 endStr:2024061523 \
  101. savePath:/dw/recommend/model/13_sample_data_check_print/ \
  102. table:alg_recsys_sample_all_new \
  103. > p13_2024061500_check.log 2>&1 &
  104. 两个都要执行:过滤不需要的样本
  105. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  106. --class com.aliyun.odps.spark.examples.makedata.makedata_16_bucketData_20240609_check \
  107. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  108. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  109. readPath:/dw/recommend/model/14_feature_data_check_print/ \
  110. savePath:/dw/recommend/model/16_train_data_check_print/ \
  111. beginStr:20240615 endStr:20240615 repartition:1000 \
  112. > p16_data_check.log 2>&1 &
  113. /dw/recommend/model/13_sample_data_check/
  114. /dw/recommend/model/13_sample_data_check_print/
  115. /dw/recommend/model/14_feature_data_check/
  116. /dw/recommend/model/14_feature_data_check_print/
  117. /dw/recommend/model/16_train_data_check/
  118. /dw/recommend/model/16_train_data_check_print/
  119. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  120. --class com.aliyun.odps.spark.examples.makedata.makedata_17_bucketDataPrint_20240617 \
  121. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  122. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  123. > p17_data_check.log 2>&1 &
  124. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  125. --class com.aliyun.odps.spark.examples.makedata.makedata_18_mergehour2day_20240617 \
  126. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  127. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  128. > p18_data_check.log 2>&1 &
  129. nohup /opt/apps/SPARK2/spark-2.4.8-hadoop3.2-1.0.8/bin/spark-class2 org.apache.spark.deploy.SparkSubmit \
  130. --class com.aliyun.odps.spark.examples.makedata.makedata_17_bucketDataPrint_20240617 \
  131. --master yarn --driver-memory 2G --executor-memory 4G --executor-cores 1 --num-executors 16 \
  132. ./target/spark-examples-1.0.0-SNAPSHOT-shaded.jar \
  133. beginStr:2024061800 endStr:2024061814 \
  134. readDate:20240618 \
  135. > p17_data_check.log 2>&1 &