zhangbo 9 月之前
父節點
當前提交
5bf464c571

+ 14 - 0
write_redis-下线/a-crontab的任务

@@ -0,0 +1,14 @@
+06 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_ad_feature_01_cid2action_redis_task.sh > my_logs_alg_ad_feature_01_cid2action_redis_task/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+
+08 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_ad_feature_02_vidcid2action_redis_task.sh > my_logs_alg_ad_feature_02_vidcid2action_redis_task/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+
+16 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_ad_feature_03_cid2actionv1_redis_task.sh > my_logs_alg_ad_feature_03_cid2actionv1_redis_task/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+
+18 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_ad_feature_04_vidcid2actionv1_redis_task.sh > my_logs_alg_ad_feature_04_vidcid2actionv1_redis_task/$(date+\%Y-\%m-\%d_\%H_\%M).log 2>&1
+
+#45 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_recall_undertake_task.sh > my_logs_undertask/undertask_$(date +\%Y-\%m-\%d_\%H).log 2>&1
+#20 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_recsys_feature_01_vid2titletags_redis_task.sh > my_logs_recall_01_vid2titletags_redis/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+#20 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_recsys_recall_01_tag2vids_redis_task.sh > my_logs_recall_01_tag2vids_redis/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+#10 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_recsys_recall_02_cfrovn_redis_task.sh > my_logs_recall_02_cfrovn_redis/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+#10 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_rank_item_realtime_1day_task.sh > my_logs_feature/rt_1day_$(date +\%Y-\%m-\%d_\%H).log 2>&1
+#25 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_rank_item_realtime_1hroot_task.sh > my_logs_feature/rt_1hroot_$(date +\%Y-\%m-\%d_\%H).log 2>&1

+ 0 - 0
write_redis/alg_ad_feature_01_cid2action_redis.py → write_redis-下线/alg_ad_feature_01_cid2action_redis.py


+ 0 - 0
write_redis/alg_ad_feature_01_cid2action_redis_task.sh → write_redis-下线/alg_ad_feature_01_cid2action_redis_task.sh


+ 0 - 0
write_redis/alg_ad_feature_02_vidcid2action_redis.py → write_redis-下线/alg_ad_feature_02_vidcid2action_redis.py


+ 0 - 0
write_redis/alg_ad_feature_02_vidcid2action_redis_task.sh → write_redis-下线/alg_ad_feature_02_vidcid2action_redis_task.sh


+ 0 - 0
write_redis/alg_ad_feature_03_cid2actionv1_redis.py → write_redis-下线/alg_ad_feature_03_cid2actionv1_redis.py


+ 0 - 0
write_redis/alg_ad_feature_03_cid2actionv1_redis_task.sh → write_redis-下线/alg_ad_feature_03_cid2actionv1_redis_task.sh


+ 0 - 0
write_redis/alg_ad_feature_04_vidcid2actionv1_redis.py → write_redis-下线/alg_ad_feature_04_vidcid2actionv1_redis.py


+ 0 - 0
write_redis/alg_ad_feature_04_vidcid2actionv1_redis_task.sh → write_redis-下线/alg_ad_feature_04_vidcid2actionv1_redis_task.sh


+ 0 - 0
write_redis/alg_recsys_feature_01_vid2titletags_redis.py → write_redis-下线/alg_recsys_feature_01_vid2titletags_redis.py


+ 0 - 0
write_redis/alg_recsys_feature_01_vid2titletags_redis_task.sh → write_redis-下线/alg_recsys_feature_01_vid2titletags_redis_task.sh


+ 0 - 0
write_redis/alg_recsys_recall_01_tag2vids_redis.py → write_redis-下线/alg_recsys_recall_01_tag2vids_redis.py


+ 0 - 0
write_redis/alg_recsys_recall_01_tag2vids_redis_task.sh → write_redis-下线/alg_recsys_recall_01_tag2vids_redis_task.sh


+ 0 - 0
write_redis/alg_recsys_recall_02_cfrovn_redis.py → write_redis-下线/alg_recsys_recall_02_cfrovn_redis.py


+ 0 - 0
write_redis/alg_recsys_recall_02_cfrovn_redis_task.sh → write_redis-下线/alg_recsys_recall_02_cfrovn_redis_task.sh


+ 0 - 0
write_redis/__init__.py


+ 7 - 0
write_redis/a-crontab的任务

@@ -0,0 +1,7 @@
+# zhangbo-recsys
+00 11 * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_delete_file.sh >p_delete_zhangbo.log 2>&1
+10 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_rank_item_realtime_1h_task.sh > my_logs_feature/rt_1h_$(date +\%Y-\%m-\%d_\%H).log 2>&1
+15 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_rank_item_realtime_1hrootall_task.sh > my_logs_feature/rt_1hrootall_$(date +\%Y-\%m-\%d_\%H).log 2>&1
+*/10 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_recall_shield_videos_task.sh > my_logs_shield/shield_videos_$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1
+10 * * * * cd /root/zhangbo/rov-offline && /bin/sh alg_recsys_recall_tags_videos_task.sh > my_logs_tags/tags_$(date +\%Y-\%m-\%d_\%H).log 2>&1
+16 * * * * cd /root/zhangbo/rov-offline/write_redis && /bin/sh alg_recsys_feature_02_vidhasreturnrov_redis_task.sh > my_logs_alg_recsys_feature_02_vidhasreturnrov_redis_task/$(date +\%Y-\%m-\%d_\%H_\%M).log 2>&1

+ 91 - 34
alg_recsys_rank_item_realtime_1h.py → write_redis/alg_recsys_rank_item_realtime_1h.py

@@ -1,16 +1,26 @@
 # -*- coding: utf-8 -*-
-import traceback
-import datetime
+import os
+import sys
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from multiprocessing import Process
 from odps import ODPS
 from threading import Timer
-from my_utils import RedisHelper, get_data_from_odps, send_msg_to_feishu
+import threading
+from my_utils import RedisHelper, execute_sql_from_odps
 from my_config import set_config
 from log import Log
-from alg_recsys_recall_4h_region_trend import records_process_for_list
 import json
-from datetime import datetime, timedelta
-import sys
-from my_utils import execute_sql_from_odps
+from datetime import datetime
+from queue import Queue
+from tqdm import tqdm
+import time
+
+
 
 
 config_, _ = set_config()
@@ -18,13 +28,43 @@ log_ = Log()
 redis_helper = RedisHelper()
 
 REDIS_PREFIX = "item_rt_fea_1h_"
-
+EXPIRE_TIME = 24 * 3600
+
+def worker(queue, executor):
+    while True:
+        row = queue.get()
+        if row is None:  # 结束信号
+            queue.task_done()
+            break
+        executor(row)
+        queue.task_done()
+def records_process_for_list(records, executor, max_size=50, num_workers=10):
+    # 创建一个线程安全的队列
+    queue = Queue(maxsize=max_size)  # 可以调整 maxsize 以控制内存使用
+    # 设置线程池大小
+    num_workers = num_workers
+    # 启动工作线程
+    threads = []
+    for _ in range(num_workers):
+        t = threading.Thread(target=worker, args=(queue, executor))
+        t.start()
+        threads.append(t)
+    # 读取数据并放入队列
+    for row in tqdm(records):
+        queue.put(row)
+    # 发送结束信号
+    for _ in range(num_workers):
+        queue.put(None)
+    # 等待所有任务完成
+    queue.join()
+    # 等待所有工作线程结束
+    for t in threads:
+        t.join()
 def process_and_store(row):
-    video_id, json_str = row
-    key = REDIS_PREFIX + str(video_id)
-    expire_time = 24 * 3600
+    table_key, json_str = row
+    key = REDIS_PREFIX + str(table_key)
+    expire_time = EXPIRE_TIME
     redis_helper.set_data_to_redis(key, json_str, expire_time)
-    # log_.info("video写入数据key={},value={}".format(key, json_str))
 
 def check_data(project, table, partition) -> int:
     """检查数据是否准备好,输出数据条数"""
@@ -51,7 +91,7 @@ def check_data(project, table, partition) -> int:
             log_.info("表{}分区{}不存在".format(table, partition))
             data_count = 0
     except Exception as e:
-        log_.error("table:{},partition:{} no data. return data_count=0:{}".format(table, partition, e))
+        log_.error("table:{},partition:{} no data. return data_count=0,报错原因是:{}".format(table, partition, e))
         data_count = 0
     return data_count
 
@@ -129,44 +169,61 @@ def get_sql(date, previous_date_str, project):
     return video_list
 
 
-def h_timer_check():
+def main():
     try:
         date = sys.argv[1]
         hour = sys.argv[2]
     except Exception as e:
-        now_date = datetime.today()
-        date = datetime.strftime(now_date, '%Y%m%d')
+        date = datetime.now().strftime('%Y%m%d')
         hour = datetime.now().hour
-        log_.info("没有读取到参数,采用系统时间,报错info:{}".format(e))
+        log_.info("没有读取到参数,采用系统时间: {}".format(e))
+    log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
+    if hour in []:
+        log_.info(f"hour={hour}不执行,直接返回。")
+        return
     # 1 判断上游数据表是否生产完成
     project = "loghubods"
     table = "video_each_hour_update_no_province_apptype"
     partition = str(date) + str(hour)
-    table_data_cnt = check_data(project, table, partition)
-    if table_data_cnt == 0:
-        log_.info("上游数据{}未就绪{},等待...".format(table, partition))
-        Timer(60, h_timer_check).start()
-    else:
-        log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
-        # 2 读取数据表 处理特征
-        previous_date_str = (datetime.strptime(date, "%Y%m%d") - timedelta(days=1)).strftime("%Y%m%d")
-        video_list = get_sql(date, previous_date_str, project)
-        # 3 写入redis
-        log_.info("video的数据量:{}".format(len(video_list)))
-        records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
-
-        redis_helper.set_data_to_redis(REDIS_PREFIX + "partition", partition, 24 * 3600)
-
-
+    run_flag = True
+    begin_ts = int(time.time())
+    table_data_cnt = 0
+    while run_flag:
+        if int(time.time()) - begin_ts >= 60 * 40:
+            log_.info("等待上游数据超过40分钟了,认为失败退出:过了{}秒。".format(int(time.time()) - begin_ts))
+            sys.exit(1)
+        table_data_cnt = check_data(project, table, partition)
+        if table_data_cnt == 0:
+            log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+            log_.info("等待2分钟")
+            time.sleep(60 * 2)
+        else:
+            run_flag = False
 
+    log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+    # 2 读取数据表 处理特征
+    previous_date_str = (datetime.strptime(date, "%Y%m%d") - datetime.timedelta(days=1)).strftime("%Y%m%d")
+    video_list = get_sql(date, previous_date_str, project)
+    # 3 写入redis
+    records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+    redis_helper.set_data_to_redis(REDIS_PREFIX + "partition", partition, 24 * 3600)
 
 if __name__ == '__main__':
     log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
-    h_timer_check()
+    process = Process(target=main)
+    process.start()
+    # 等待子进程完成或超时
+    timeout = 3600
+    process.join(timeout=timeout)  # 设置超时为3600秒(1小时)
+    if process.is_alive():
+        print("脚本执行时间超过1小时,执行失败,经过了{}秒。".format(timeout))
+        process.terminate()  # 终止子进程
+        sys.exit(1)  # 直接退出主进程并返回状态码999
     log_.info("完成执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
 
 
 
 
+
 # cd /root/zhangbo/rov-offline
 # python alg_recsys_rank_item_realtime_1h.py 20240117 20

+ 10 - 5
alg_recsys_rank_item_realtime_1h_task.sh → write_redis/alg_recsys_rank_item_realtime_1h_task.sh

@@ -1,21 +1,26 @@
 source /etc/profile
 echo $ROV_OFFLINE_ENV
-if [ ! -d "my_logs_feature" ]; then
-    mkdir my_logs_feature
+
+log_dir="my_logs_alg_recsys_rank_item_realtime_1h_task"
+if [ ! -d ${log_dir} ]; then
+    mkdir ${log_dir}
 fi
+
 cur_time="`date +%Y%m%d`"
 cur_h="`date +%H`"
 echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
-
 if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
   /root/anaconda3/bin/python alg_recsys_rank_item_realtime_1h.py $cur_time $cur_h
 elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
   /root/anaconda3/bin/python alg_recsys_rank_item_realtime_1h.py $cur_time $cur_h
   if [ $? -ne 0 ]; then
     msg="写redis的任务,一层曝光/分享/回流到redis,用于排序,alg_recsys_rank_item_realtime_1h.py:${cur_time}-${cur_h}-something-is-wrong."
-    /root/anaconda3/bin/python write_redis/utils_monitor.py ${msg}
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
   fi
 fi
 echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
 echo "all done"
-#sh alg_recsys_rank_item_realtime_1h_task.sh
+
+
+
+# sh alg_recsys_rank_item_realtime_1h_task.sh