浏览代码

Merge branch 'feature/zhangbo_flow_recall' of algorithm/rov-offline into master

zhangbo 10 月之前
父节点
当前提交
a2cf18bef0

+ 45 - 37
alg_recsys_coldstart_offlinecheck.py

@@ -1,13 +1,10 @@
 # -*- coding: utf-8 -*-
 import time
-import traceback
 from my_config import set_config
 from log import Log
-from my_utils import execute_sql_from_odps
 from db_helper import RedisHelper
-from datetime import datetime, timedelta
+from datetime import datetime
 
-from alg_recsys_recall_4h_region_trend import records_process_for_list
 config_, _ = set_config()
 log_ = Log()
 redis_helper = RedisHelper()
@@ -18,41 +15,52 @@ def main():
     date_write = ""
     while True:
         date_cur = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
-        if "2024-05-10 20:3" in date_cur:
+        if "2024-06-01" in date_cur:
             print("退出")
             break
-        # if date_write == date_cur:
-        #     time.sleep(10)
-        #     continue
+        apptype_list = ["0", "4", "5", "21", "3", "6", "2", "13", "17", "18", "19", "22"]
+        level_list = ["1", "2", "3", "4", "5"]
+        for app in apptype_list:
+            for level in level_list:
+                key = "flow:pool:level:item:" + app + ":" + level
+                result = redis_helper.get_data_from_set(key)
+                if not result:
+                    result = []
+                size = len(result)
+                log_str = "\t".join([date_cur, app, level, str(size)])
+                log_.info(log_str)
+        time.sleep(40)
+
+
+
+        # for level in ["1", "2", "3", "4"]:
+        #     key = "flow:pool:level:item:4:" + level
+        #     result = redis_helper.get_data_from_set(key)
+        #     if not result:
+        #         result = []
+        #     size = len(result)
+        #     log_str = "\t".join([date_cur, "4", level, str(size)])
+        #     # print(log_str)
+        #     log_.info(log_str)
+        # for level in ["1", "2", "3", "4"]:
+        #     key = "flow:pool:level:item:0:" + level
+        #     result = redis_helper.get_data_from_set(key)
+        #     if not result:
+        #         result = []
+        #     size = len(result)
+        #     log_str = "\t".join([date_cur, "0", level, str(size)])
+        #     # print(log_str)
+        #     log_.info(log_str)
+        # for level in ["1", "2", "3", "4"]:
+        #     key = "flow:pool:level:item:21:" + level
+        #     result = redis_helper.get_data_from_set(key)
+        #     if not result:
+        #         result = []
+        #     size = len(result)
+        #     log_str = "\t".join([date_cur, "21", level, str(size)])
+        #     # print(log_str)
+        #     log_.info(log_str)
 
-        for level in ["1", "2", "3", "4"]:
-            key = "flow:pool:level:item:4:" + level
-            result = redis_helper.get_data_from_set(key)
-            if not result:
-                result = []
-            size = len(result)
-            log_str = "\t".join([date_cur, "4", level, str(size)])
-            # print(log_str)
-            log_.info(log_str)
-        for level in ["1", "2", "3", "4"]:
-            key = "flow:pool:level:item:0:" + level
-            result = redis_helper.get_data_from_set(key)
-            if not result:
-                result = []
-            size = len(result)
-            log_str = "\t".join([date_cur, "0", level, str(size)])
-            # print(log_str)
-            log_.info(log_str)
-        for level in ["1", "2", "3", "4"]:
-            key = "flow:pool:level:item:21:" + level
-            result = redis_helper.get_data_from_set(key)
-            if not result:
-                result = []
-            size = len(result)
-            log_str = "\t".join([date_cur, "21", level, str(size)])
-            # print(log_str)
-            log_.info(log_str)
-        time.sleep(30)
         # date_write = datetime.now().strftime("%Y-%m-%d %H:%M")
 
 
@@ -63,4 +71,4 @@ if __name__ == '__main__':
 
 
 # cd /root/zhangbo/rov-offline
-# nohup python alg_recsys_coldstart_offlinecheck.py > p.log 2>&1 &
+# nohup python alg_recsys_coldstart_offlinecheck.py > p2.log 2>&1 &

+ 13 - 1
alg_recsys_delete_file.sh

@@ -1,8 +1,20 @@
 
-day="$(date -d '4 days ago' +%Y-%m-%d)"
+day="$(date -d '7 days ago' +%Y-%m-%d)"
 rm -rf /root/zhangbo/rov-offline/my_logs_tags/tags_${day}*
 rm -rf /root/zhangbo/rov-offline/my_logs/task_${day}*
 rm -rf /root/zhangbo/rov-offline/my_logs_feature/rt_1day_${day}*
 rm -rf /root/zhangbo/rov-offline/my_logs_feature/rt_1h_${day}*
+rm -rf /root/zhangbo/rov-offline/my_logs_feature/rt_1hroot_${day}*
+rm -rf /root/zhangbo/rov-doffline/my_logs_feature/rt_1hrootall_${day}*
 rm -rf /root/zhangbo/rov-offline/my_logs_shield/shield_videos_${day}*
+rm -rf /root/zhangbo/rov-offline/logs/${day}*
+rm -rf /data2/zhangbo_logs_dir/20240425_flow_pool_task_log/${day}*
+rm -rf /data2/zhangbo_logs_dir/20240425_flow_pool_score_task_log/${day}*
+rm -rf /root/zhangbo/rov-offline/write_redis/my_logs_recall_01_vid2titletags_redis/${day}*
+rm -rf /root/zhangbo/rov-offline/write_redis/my_logs_recall_01_tag2vids_redis/${day}*
+rm -rf /root/zhangbo/rov-offline/write_redis/my_logs_recall_02_cfrovn_redis/${day}*
+rm -rf /root/zhangbo/rov-offline/write_redis/my_logs_alg_ad_feature_01_cid2action_redis_task/${day}*
+rm -rf /root/zhangbo/rov-offline/write_redis/my_logs_alg_ad_feature_02_vidcid2action_redis_task/${day}*
+
+day="$(date -d '10 days ago' +%Y%m%d)"
 rm -rf /root/zhangbo/rov-offline/logs/${day}*

+ 6 - 5
alg_recsys_rank_item_realtime_1h_task.sh

@@ -9,12 +9,13 @@ echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
 
 if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
   /root/anaconda3/bin/python alg_recsys_rank_item_realtime_1h.py $cur_time $cur_h
-  echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
-  echo "all done"
 elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
   /root/anaconda3/bin/python alg_recsys_rank_item_realtime_1h.py $cur_time $cur_h
-  echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
-  echo "all done"
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,一层曝光/分享/回流到redis,用于排序,alg_recsys_rank_item_realtime_1h.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python write_redis/utils_monitor.py ${msg}
+  fi
 fi
-
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
 #sh alg_recsys_rank_item_realtime_1h_task.sh

+ 6 - 4
alg_recsys_rank_item_realtime_1hroot_task.sh

@@ -9,12 +9,14 @@ echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
 
 if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
   /root/anaconda3/bin/python alg_recsys_rank_item_realtime_1hroot.py $cur_time $cur_h
-  echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
-  echo "all done"
 elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
   /root/anaconda3/bin/python alg_recsys_rank_item_realtime_1hroot.py $cur_time $cur_h
-  echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
-  echo "all done"
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,累积曝光/分享/回流到redis,用于排序,alg_recsys_rank_item_realtime_1hroot.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python write_redis/utils_monitor.py ${msg}
+  fi
 fi
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
 
 #sh alg_recsys_rank_item_realtime_1hroot_task.sh

+ 188 - 0
write_redis/alg_ad_feature_01_cid2action_redis.py

@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from odps import ODPS
+from threading import Timer
+import threading
+from my_utils import RedisHelper, execute_sql_from_odps
+from my_config import set_config
+from log import Log
+import json
+from datetime import datetime
+from queue import Queue
+from tqdm import tqdm
+
+
+
+
+config_, _ = set_config()
+log_ = Log()
+redis_helper = RedisHelper()
+
+REDIS_PREFIX = "redis:cid_action:"
+EXPIRE_TIME = 6 * 3600
+
+def worker(queue, executor):
+    while True:
+        row = queue.get()
+        if row is None:  # 结束信号
+            queue.task_done()
+            break
+        executor(row)
+        queue.task_done()
+def records_process_for_list(records, executor, max_size=50, num_workers=10):
+    # 创建一个线程安全的队列
+    queue = Queue(maxsize=max_size)  # 可以调整 maxsize 以控制内存使用
+    # 设置线程池大小
+    num_workers = num_workers
+    # 启动工作线程
+    threads = []
+    for _ in range(num_workers):
+        t = threading.Thread(target=worker, args=(queue, executor))
+        t.start()
+        threads.append(t)
+    # 读取数据并放入队列
+    for row in tqdm(records):
+        queue.put(row)
+    # 发送结束信号
+    for _ in range(num_workers):
+        queue.put(None)
+    # 等待所有任务完成
+    queue.join()
+    # 等待所有工作线程结束
+    for t in threads:
+        t.join()
+def process_and_store(row):
+    table_key, json_str = row
+    key = REDIS_PREFIX + str(table_key)
+    expire_time = EXPIRE_TIME
+    redis_helper.set_data_to_redis(key, json_str, expire_time)
+
+def check_data(project, table,  date, hour, mm) -> int:
+    """检查数据是否准备好,输出数据条数"""
+    odps = ODPS(
+        access_id=config_.ODPS_CONFIG['ACCESSID'],
+        secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=config_.ODPS_CONFIG['ENDPOINT'],
+        connect_timeout=3000,
+        read_timeout=500000,
+        pool_maxsize=1000,
+        pool_connections=1000
+    )
+    try:
+        t = odps.get_table(name=table)
+        log_.info(f"检查分区是否存在-【 dt={date} hh={hour}】")
+        check_res = t.exist_partition(partition_spec=f'dt={date},hh={hour},mm={mm}')
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {date} and hh = {hour} and mm = {mm}'
+            log_.info(sql)
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            log_.info("表{}分区{}/{}不存在".format(table, date, hour))
+            data_count = 0
+    except Exception as e:
+        log_.error("table:{},date:{},hour:{} no data. return data_count=0,报错原因是:{}".format(table, date, hour, e))
+        data_count = 0
+    return data_count
+
+def get_sql(project, table, date, hour, mm):
+    sql = '''
+    SELECT  cid
+            ,exp
+            ,click
+            ,order
+            ,cpa
+    FROM    {}.{}
+    WHERE   dt = '{}'
+    and     hh = '{}'
+    and     mm = '{}'
+    '''.format(
+        project, table, date, hour, mm
+    )
+    print("sql:" + sql)
+    records = execute_sql_from_odps(project=project, sql=sql)
+    video_list = []
+    with records.open_reader() as reader:
+        for record in reader:
+            key = record['cid']
+            m = dict()
+            try:
+                m["exp"] = record['exp']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["click"] = record['click']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["order"] = record['order']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["cpa"] = record['cpa']
+            except Exception as e:
+                log_.error(e)
+
+            json_str = json.dumps(m)
+            video_list.append([key, json_str])
+    return video_list
+
+
+def main():
+    try:
+        date = sys.argv[1]
+        hour = sys.argv[2]
+        # mm = sys.argv[3]
+        mm = "00"
+    except Exception as e:
+        date = datetime.now().strftime('%Y%m%d')
+        hour = datetime.now().hour
+        # mm = datetime.now().minute
+        mm = "00"
+        log_.info("没有读取到参数,采用系统时间:{}".format(e))
+    log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
+    if hour in ["00", "01"]:
+        log_.info(f"hour={hour}不执行,直接返回。")
+        return
+    # 1 判断上游数据表是否生产完成
+    project = "loghubods"
+    table = "alg_ad_feature_cid_action"
+    table_data_cnt = check_data(project, table, date, hour, mm)
+    if table_data_cnt == 0:
+        log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+        Timer(60, main).start()
+    else:
+        log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+        # 2 读取数据表 处理特征
+        video_list = get_sql(project, table, date, hour, mm)
+        # 3 写入redis
+        log_.info("video的数据量:{}".format(len(video_list)))
+        records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+
+if __name__ == '__main__':
+    log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+    main()
+    log_.info("完成执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+
+
+# cd /root/zhangbo/rov-offline
+# python alg_ad_feature_01_cid2action_redis.py 20240523 19 00
+
+"""
+    !!!!!!!!!!!!!!
+    更改字段:table 表名
+            REDIS_PREFIX redis的key
+            EXPIRE_TIME redis的过期时间
+            sql 各种字段
+            record 各种字段
+            if hour in ["00"]: 哪些小时不执行
+"""

+ 26 - 0
write_redis/alg_ad_feature_01_cid2action_redis_task.sh

@@ -0,0 +1,26 @@
+source /etc/profile
+echo $ROV_OFFLINE_ENV
+
+log_dir="my_logs_alg_ad_feature_01_cid2action_redis_task"
+if [ ! -d ${log_dir} ]; then
+    mkdir ${log_dir}
+fi
+
+cur_time="`date +%Y%m%d`"
+cur_h="`date +%H`"
+echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_01_cid2action_redis.py $cur_time $cur_h
+elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_01_cid2action_redis.py $cur_time $cur_h
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,ad广告的cid行为,alg_ad_feature_01_cid2action_redis.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
+  fi
+fi
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
+
+
+
+#sh alg_ad_feature_01_cid2action_redis_task.sh

+ 190 - 0
write_redis/alg_ad_feature_02_vidcid2action_redis.py

@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from odps import ODPS
+from threading import Timer
+import threading
+from my_utils import RedisHelper, execute_sql_from_odps
+from my_config import set_config
+from log import Log
+import json
+from datetime import datetime
+from queue import Queue
+from tqdm import tqdm
+
+
+
+
+config_, _ = set_config()
+log_ = Log()
+redis_helper = RedisHelper()
+
+REDIS_PREFIX = "redis:vid_cid_action:"
+EXPIRE_TIME = 6 * 3600
+
+def worker(queue, executor):
+    while True:
+        row = queue.get()
+        if row is None:  # 结束信号
+            queue.task_done()
+            break
+        executor(row)
+        queue.task_done()
+def records_process_for_list(records, executor, max_size=50, num_workers=10):
+    # 创建一个线程安全的队列
+    queue = Queue(maxsize=max_size)  # 可以调整 maxsize 以控制内存使用
+    # 设置线程池大小
+    num_workers = num_workers
+    # 启动工作线程
+    threads = []
+    for _ in range(num_workers):
+        t = threading.Thread(target=worker, args=(queue, executor))
+        t.start()
+        threads.append(t)
+    # 读取数据并放入队列
+    for row in tqdm(records):
+        queue.put(row)
+    # 发送结束信号
+    for _ in range(num_workers):
+        queue.put(None)
+    # 等待所有任务完成
+    queue.join()
+    # 等待所有工作线程结束
+    for t in threads:
+        t.join()
+def process_and_store(row):
+    table_key, json_str = row
+    key = REDIS_PREFIX + str(table_key)
+    expire_time = EXPIRE_TIME
+    redis_helper.set_data_to_redis(key, json_str, expire_time)
+
+def check_data(project, table,  date, hour, mm) -> int:
+    """检查数据是否准备好,输出数据条数"""
+    odps = ODPS(
+        access_id=config_.ODPS_CONFIG['ACCESSID'],
+        secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=config_.ODPS_CONFIG['ENDPOINT'],
+        connect_timeout=3000,
+        read_timeout=500000,
+        pool_maxsize=1000,
+        pool_connections=1000
+    )
+    try:
+        t = odps.get_table(name=table)
+        log_.info(f"检查分区是否存在-【 dt={date} hh={hour}】")
+        check_res = t.exist_partition(partition_spec=f'dt={date},hh={hour},mm={mm}')
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {date} and hh = {hour} and mm = {mm}'
+            log_.info(sql)
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            log_.info("表{}分区{}/{}不存在".format(table, date, hour))
+            data_count = 0
+    except Exception as e:
+        log_.error("table:{},date:{},hour:{} no data. return data_count=0,报错原因是:{}".format(table, date, hour, e))
+        data_count = 0
+    return data_count
+
+def get_sql(project, table, date, hour, mm):
+    sql = '''
+    SELECT  vid, cid
+            ,exp
+            ,click
+            ,order
+            ,cpa
+    FROM    {}.{}
+    WHERE   dt = '{}'
+    and     hh = '{}'
+    and     mm = '{}'
+    '''.format(
+        project, table, date, hour, mm
+    )
+    print("sql:" + sql)
+    records = execute_sql_from_odps(project=project, sql=sql)
+    video_list = []
+    with records.open_reader() as reader:
+        for record in reader:
+            key1 = record['vid']
+            key2 = record['cid']
+            key = key1+"_"+key2
+            m = dict()
+            try:
+                m["exp"] = record['exp']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["click"] = record['click']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["order"] = record['order']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["cpa"] = record['cpa']
+            except Exception as e:
+                log_.error(e)
+
+            json_str = json.dumps(m)
+            video_list.append([key, json_str])
+    return video_list
+
+
+def main():
+    try:
+        date = sys.argv[1]
+        hour = sys.argv[2]
+        # mm = sys.argv[3]
+        mm = "00"
+    except Exception as e:
+        date = datetime.now().strftime('%Y%m%d')
+        hour = datetime.now().hour
+        # mm = datetime.now().minute
+        mm = "00"
+        log_.info("没有读取到参数,采用系统时间:{}".format(e))
+    log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
+    if hour in ["00", "01"]:
+        log_.info(f"hour={hour}不执行,直接返回。")
+        return
+    # 1 判断上游数据表是否生产完成
+    project = "loghubods"
+    table = "alg_ad_feature_vidcid_action"
+    table_data_cnt = check_data(project, table, date, hour, mm)
+    if table_data_cnt == 0:
+        log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+        Timer(60, main).start()
+    else:
+        log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+        # 2 读取数据表 处理特征
+        video_list = get_sql(project, table, date, hour, mm)
+        # 3 写入redis
+        log_.info("video的数据量:{}".format(len(video_list)))
+        records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+
+if __name__ == '__main__':
+    log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+    main()
+    log_.info("完成执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+
+
+# cd /root/zhangbo/rov-offline
+# python alg_ad_feature_02_vidcid2action_redis.py 20240523 19 00
+
+"""
+    !!!!!!!!!!!!!!
+    更改字段:table 表名
+            REDIS_PREFIX redis的key
+            EXPIRE_TIME redis的过期时间
+            sql 各种字段
+            record 各种字段
+            if hour in ["00"]: 哪些小时不执行
+"""

+ 26 - 0
write_redis/alg_ad_feature_02_vidcid2action_redis_task.sh

@@ -0,0 +1,26 @@
+source /etc/profile
+echo $ROV_OFFLINE_ENV
+
+log_dir="my_logs_alg_ad_feature_02_vidcid2action_redis_task"
+if [ ! -d ${log_dir} ]; then
+    mkdir ${log_dir}
+fi
+
+cur_time="`date +%Y%m%d`"
+cur_h="`date +%H`"
+echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_02_vidcid2action_redis.py $cur_time $cur_h
+elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_02_vidcid2action_redis.py $cur_time $cur_h
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,ad广告的vid-cid行为,alg_ad_feature_02_vidcid2action_redis.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
+  fi
+fi
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
+
+
+
+#sh alg_ad_feature_02_vidcid2action_redis_task.sh

+ 223 - 0
write_redis/alg_ad_feature_03_cid2actionv1_redis.py

@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+from multiprocessing import Process, cpu_count
+
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from odps import ODPS
+from threading import Timer
+import threading
+from my_utils import RedisHelper, execute_sql_from_odps
+from my_config import set_config
+from log import Log
+import json
+from datetime import datetime
+from queue import Queue
+from tqdm import tqdm
+import time
+
+
+
+
+config_, _ = set_config()
+log_ = Log()
+redis_helper = RedisHelper()
+
+REDIS_PREFIX = "redis:cid_action_v1:"
+EXPIRE_TIME = 6 * 3600
+
+def worker(queue, executor):
+    while True:
+        row = queue.get()
+        if row is None:  # 结束信号
+            queue.task_done()
+            break
+        executor(row)
+        queue.task_done()
+def records_process_for_list(records, executor, max_size=50, num_workers=10):
+    # 创建一个线程安全的队列
+    queue = Queue(maxsize=max_size)  # 可以调整 maxsize 以控制内存使用
+    # 设置线程池大小
+    num_workers = num_workers
+    # 启动工作线程
+    threads = []
+    for _ in range(num_workers):
+        t = threading.Thread(target=worker, args=(queue, executor))
+        t.start()
+        threads.append(t)
+    # 读取数据并放入队列
+    for row in tqdm(records):
+        queue.put(row)
+    # 发送结束信号
+    for _ in range(num_workers):
+        queue.put(None)
+    # 等待所有任务完成
+    queue.join()
+    # 等待所有工作线程结束
+    for t in threads:
+        t.join()
+def process_and_store(row):
+    table_key, json_str = row
+    key = REDIS_PREFIX + str(table_key)
+    expire_time = EXPIRE_TIME
+    redis_helper.set_data_to_redis(key, json_str, expire_time)
+
+def check_data(project, table,  date, hour, mm) -> int:
+    """检查数据是否准备好,输出数据条数"""
+    odps = ODPS(
+        access_id=config_.ODPS_CONFIG['ACCESSID'],
+        secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=config_.ODPS_CONFIG['ENDPOINT'],
+        connect_timeout=3000,
+        read_timeout=500000,
+        pool_maxsize=1000,
+        pool_connections=1000
+    )
+    try:
+        t = odps.get_table(name=table)
+        log_.info(f"检查分区是否存在-【 dt={date} hh={hour}】")
+        check_res = t.exist_partition(partition_spec=f'dt={date},hh={hour},mm={mm}')
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {date} and hh = {hour} and mm = {mm}'
+            log_.info(sql)
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            log_.info("表{}分区{}/{}不存在".format(table, date, hour))
+            data_count = 0
+    except Exception as e:
+        log_.error("table:{},date:{},hour:{} no data. return data_count=0,报错原因是:{}".format(table, date, hour, e))
+        data_count = 0
+    return data_count
+
+def get_sql(project, table, date, hour, mm):
+    sql = '''
+    SELECT  cid
+            ,exp
+            ,click
+            ,order
+            ,cpa
+    FROM    {}.{}
+    WHERE   dt = '{}'
+    and     hh = '{}'
+    and     mm = '{}'
+    '''.format(
+        project, table, date, hour, mm
+    )
+    print("sql:" + sql)
+    records = execute_sql_from_odps(project=project, sql=sql)
+    video_list = []
+    with records.open_reader() as reader:
+        for record in reader:
+            key = record['cid']
+            m = dict()
+            try:
+                m["exp"] = record['exp']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["click"] = record['click']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["order"] = record['order']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["cpa"] = record['cpa']
+            except Exception as e:
+                log_.error(e)
+
+            json_str = json.dumps(m)
+            video_list.append([key, json_str])
+    return video_list
+
+
+def main():
+    try:
+        date = sys.argv[1]
+        hour = sys.argv[2]
+        # mm = sys.argv[3]
+        mm = "00"
+    except Exception as e:
+        date = datetime.now().strftime('%Y%m%d')
+        hour = datetime.now().hour
+        # mm = datetime.now().minute
+        mm = "00"
+        log_.info("没有读取到参数,采用系统时间:{}".format(e))
+    log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
+    if hour in ["00", "01", "02", "03", "04", "05", "06"]:
+        log_.info(f"hour={hour}不执行,直接返回。")
+        return
+    # 1 判断上游数据表是否生产完成
+    project = "loghubods"
+    table = "alg_ad_feature_cid_action_v1"
+    run_flag = True
+    begin_ts = int(time.time())
+    table_data_cnt = 0
+    while run_flag:
+        if int(time.time()) - begin_ts >= 60 * 40:
+            log_.info("等待上游数据超过50分钟了,认为失败退出:过了{}秒。".format(int(time.time()) - begin_ts))
+            exit(999)
+        table_data_cnt = check_data(project, table, date, hour, mm)
+        if table_data_cnt == 0:
+            log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+            log_.info("等待2分钟")
+            time.sleep(60*2)
+        else:
+            run_flag = False
+
+    log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+    # 2 读取数据表 处理特征
+    video_list = get_sql(project, table, date, hour, mm)
+    # 3 写入redis
+    log_.info("video的数据量:{}".format(len(video_list)))
+    records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+
+    # if table_data_cnt == 0:
+    #     log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+    #     Timer(60, main).start()
+    # else:
+    #     log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+    #     # 2 读取数据表 处理特征
+    #     video_list = get_sql(project, table, date, hour, mm)
+    #     # 3 写入redis
+    #     log_.info("video的数据量:{}".format(len(video_list)))
+    #     records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+
+if __name__ == '__main__':
+    log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+    # main()
+    process = Process(target=main)
+    process.start()
+    # 等待子进程完成或超时
+    timeout = 3600
+    process.join(timeout=timeout)  # 设置超时为3600秒(1小时)
+    if process.is_alive():
+        print("脚本执行时间超过1小时,执行失败,经过了{}秒。".format(timeout))
+        process.terminate()  # 终止子进程
+        exit(999)  # 直接退出主进程并返回状态码999
+    log_.info("完成执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+
+
+# cd /root/zhangbo/rov-offline
+# python alg_ad_feature_03_cid2actionv1_redis.py 20240605 19 00
+
+"""
+    !!!!!!!!!!!!!!
+    更改字段:table 表名
+            REDIS_PREFIX redis的key
+            EXPIRE_TIME redis的过期时间
+            sql 各种字段
+            record 各种字段
+            if hour in ["00"]: 哪些小时不执行
+            process.join(timeout=3600) 任务超时时间3600
+            int(time.time()) - begin_ts >= 60*50 任务超时时间3000
+"""

+ 26 - 0
write_redis/alg_ad_feature_03_cid2actionv1_redis_task.sh

@@ -0,0 +1,26 @@
+source /etc/profile
+echo $ROV_OFFLINE_ENV
+
+log_dir="my_logs_alg_ad_feature_03_cid2actionv1_redis_task"
+if [ ! -d ${log_dir} ]; then
+    mkdir ${log_dir}
+fi
+
+cur_time="`date +%Y%m%d`"
+cur_h="`date +%H`"
+echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_03_cid2actionv1_redis.py $cur_time $cur_h
+elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_03_cid2actionv1_redis.py $cur_time $cur_h
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,ad广告的cid行为,alg_ad_feature_03_cid2actionv1_redis.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
+  fi
+fi
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
+
+
+
+#sh alg_ad_feature_03_cid2actionv1_redis_task.sh

+ 212 - 0
write_redis/alg_ad_feature_04_vidcid2actionv1_redis.py

@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from multiprocessing import Process
+from odps import ODPS
+from threading import Timer
+import threading
+from my_utils import RedisHelper, execute_sql_from_odps
+from my_config import set_config
+from log import Log
+import json
+from datetime import datetime
+from queue import Queue
+from tqdm import tqdm
+import time
+
+
+
+
+config_, _ = set_config()
+log_ = Log()
+redis_helper = RedisHelper()
+
+REDIS_PREFIX = "redis:vid_cid_action_v1:"
+EXPIRE_TIME = 6 * 3600
+
+def worker(queue, executor):
+    while True:
+        row = queue.get()
+        if row is None:  # 结束信号
+            queue.task_done()
+            break
+        executor(row)
+        queue.task_done()
+def records_process_for_list(records, executor, max_size=50, num_workers=10):
+    # 创建一个线程安全的队列
+    queue = Queue(maxsize=max_size)  # 可以调整 maxsize 以控制内存使用
+    # 设置线程池大小
+    num_workers = num_workers
+    # 启动工作线程
+    threads = []
+    for _ in range(num_workers):
+        t = threading.Thread(target=worker, args=(queue, executor))
+        t.start()
+        threads.append(t)
+    # 读取数据并放入队列
+    for row in tqdm(records):
+        queue.put(row)
+    # 发送结束信号
+    for _ in range(num_workers):
+        queue.put(None)
+    # 等待所有任务完成
+    queue.join()
+    # 等待所有工作线程结束
+    for t in threads:
+        t.join()
+def process_and_store(row):
+    table_key, json_str = row
+    key = REDIS_PREFIX + str(table_key)
+    expire_time = EXPIRE_TIME
+    redis_helper.set_data_to_redis(key, json_str, expire_time)
+
+def check_data(project, table,  date, hour, mm) -> int:
+    """检查数据是否准备好,输出数据条数"""
+    odps = ODPS(
+        access_id=config_.ODPS_CONFIG['ACCESSID'],
+        secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=config_.ODPS_CONFIG['ENDPOINT'],
+        connect_timeout=3000,
+        read_timeout=500000,
+        pool_maxsize=1000,
+        pool_connections=1000
+    )
+    try:
+        t = odps.get_table(name=table)
+        log_.info(f"检查分区是否存在-【 dt={date} hh={hour}】")
+        check_res = t.exist_partition(partition_spec=f'dt={date},hh={hour},mm={mm}')
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {date} and hh = {hour} and mm = {mm}'
+            log_.info(sql)
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            log_.info("表{}分区{}/{}不存在".format(table, date, hour))
+            data_count = 0
+    except Exception as e:
+        log_.error("table:{},date:{},hour:{} no data. return data_count=0,报错原因是:{}".format(table, date, hour, e))
+        data_count = 0
+    return data_count
+
+def get_sql(project, table, date, hour, mm):
+    sql = '''
+    SELECT  vid, cid
+            ,exp
+            ,click
+            ,order
+            ,cpa
+    FROM    {}.{}
+    WHERE   dt = '{}'
+    and     hh = '{}'
+    and     mm = '{}'
+    '''.format(
+        project, table, date, hour, mm
+    )
+    print("sql:" + sql)
+    records = execute_sql_from_odps(project=project, sql=sql)
+    video_list = []
+    with records.open_reader() as reader:
+        for record in reader:
+            key1 = record['vid']
+            key2 = record['cid']
+            key = key1+"_"+key2
+            m = dict()
+            try:
+                m["exp"] = record['exp']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["click"] = record['click']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["order"] = record['order']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["cpa"] = record['cpa']
+            except Exception as e:
+                log_.error(e)
+
+            json_str = json.dumps(m)
+            video_list.append([key, json_str])
+    return video_list
+
+
+def main():
+    try:
+        date = sys.argv[1]
+        hour = sys.argv[2]
+        # mm = sys.argv[3]
+        mm = "00"
+    except Exception as e:
+        date = datetime.now().strftime('%Y%m%d')
+        hour = datetime.now().hour
+        # mm = datetime.now().minute
+        mm = "00"
+        log_.info("没有读取到参数,采用系统时间:{}".format(e))
+    log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
+    if hour in ["00", "01", "02", "03", "04", "05", "06"]:
+        log_.info(f"hour={hour}不执行,直接返回。")
+        return
+    # 1 判断上游数据表是否生产完成
+    project = "loghubods"
+    table = "alg_ad_feature_vidcid_action_v1"
+    run_flag = True
+    begin_ts = int(time.time())
+    table_data_cnt = 0
+    while run_flag:
+        if int(time.time()) - begin_ts >= 60 * 40:
+            log_.info("等待上游数据超过50分钟了,认为失败退出:过了{}秒。".format(int(time.time()) - begin_ts))
+            exit(999)
+        table_data_cnt = check_data(project, table, date, hour, mm)
+        if table_data_cnt == 0:
+            log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+            log_.info("等待2分钟")
+            time.sleep(60 * 2)
+        else:
+            run_flag = False
+
+    log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+    # 2 读取数据表 处理特征
+    video_list = get_sql(project, table, date, hour, mm)
+    # 3 写入redis
+    log_.info("video的数据量:{}".format(len(video_list)))
+    records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+
+if __name__ == '__main__':
+    log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+    process = Process(target=main)
+    process.start()
+    # 等待子进程完成或超时
+    timeout = 3600
+    process.join(timeout=timeout)  # 设置超时为3600秒(1小时)
+    if process.is_alive():
+        print("脚本执行时间超过1小时,执行失败,经过了{}秒。".format(timeout))
+        process.terminate()  # 终止子进程
+        exit(999)  # 直接退出主进程并返回状态码999
+    log_.info("完成执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+
+
+# cd /root/zhangbo/rov-offline
+# python alg_ad_feature_04_vidcid2actionv1_redis.py 20240605 19
+
+"""
+    !!!!!!!!!!!!!!
+    更改字段:table 表名
+            REDIS_PREFIX redis的key
+            EXPIRE_TIME redis的过期时间
+            sql 各种字段
+            record 各种字段
+            if hour in ["00"]: 哪些小时不执行
+            process.join(timeout=3600) 任务超时时间3600
+            int(time.time()) - begin_ts >= 60*50 任务超时时间3000
+"""

+ 26 - 0
write_redis/alg_ad_feature_04_vidcid2actionv1_redis_task.sh

@@ -0,0 +1,26 @@
+source /etc/profile
+echo $ROV_OFFLINE_ENV
+
+log_dir="my_logs_alg_ad_feature_04_vidcid2actionv1_redis_task"
+if [ ! -d ${log_dir} ]; then
+    mkdir ${log_dir}
+fi
+
+cur_time="`date +%Y%m%d`"
+cur_h="`date +%H`"
+echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_04_vidcid2actionv1_redis.py $cur_time $cur_h
+elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
+  /root/anaconda3/bin/python alg_ad_feature_04_vidcid2actionv1_redis.py $cur_time $cur_h
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,ad广告的vid-cid行为,alg_ad_feature_04_vidcid2actionv1_redis.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
+  fi
+fi
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
+
+
+
+#sh alg_ad_feature_04_vidcid2actionv1_redis_task.sh

+ 4 - 0
write_redis/alg_recsys_recall_01_tag2vids_redis_task.sh

@@ -13,6 +13,10 @@ if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
   /root/anaconda3/bin/python alg_recsys_recall_01_tag2vids_redis.py $cur_time $cur_h
 elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
   /root/anaconda3/bin/python alg_recsys_recall_01_tag2vids_redis.py $cur_time $cur_h
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,tag到vids的召回,alg_recsys_recall_01_tag2vids_redis.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
+  fi
 fi
 echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
 echo "all done"

+ 173 - 0
write_redis/alg_recsys_recall_02_cfrovn_redis.py

@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from odps import ODPS
+from threading import Timer
+import threading
+from my_utils import RedisHelper, execute_sql_from_odps
+from my_config import set_config
+from log import Log
+import json
+from datetime import datetime
+from queue import Queue
+from tqdm import tqdm
+
+
+
+
+config_, _ = set_config()
+log_ = Log()
+redis_helper = RedisHelper()
+
+REDIS_PREFIX = "redis:cf_rovn_vid:"
+EXPIRE_TIME = 5 * 3600
+
+def worker(queue, executor):
+    while True:
+        row = queue.get()
+        if row is None:  # 结束信号
+            queue.task_done()
+            break
+        executor(row)
+        queue.task_done()
+def records_process_for_list(records, executor, max_size=50, num_workers=10):
+    # 创建一个线程安全的队列
+    queue = Queue(maxsize=max_size)  # 可以调整 maxsize 以控制内存使用
+    # 设置线程池大小
+    num_workers = num_workers
+    # 启动工作线程
+    threads = []
+    for _ in range(num_workers):
+        t = threading.Thread(target=worker, args=(queue, executor))
+        t.start()
+        threads.append(t)
+    # 读取数据并放入队列
+    for row in tqdm(records):
+        queue.put(row)
+    # 发送结束信号
+    for _ in range(num_workers):
+        queue.put(None)
+    # 等待所有任务完成
+    queue.join()
+    # 等待所有工作线程结束
+    for t in threads:
+        t.join()
+def process_and_store(row):
+    table_key, json_str = row
+    key = REDIS_PREFIX + str(table_key)
+    expire_time = EXPIRE_TIME
+    redis_helper.set_data_to_redis(key, json_str, expire_time)
+
+def check_data(project, table,  date, hour) -> int:
+    """检查数据是否准备好,输出数据条数"""
+    odps = ODPS(
+        access_id=config_.ODPS_CONFIG['ACCESSID'],
+        secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
+        project=project,
+        endpoint=config_.ODPS_CONFIG['ENDPOINT'],
+        connect_timeout=3000,
+        read_timeout=500000,
+        pool_maxsize=1000,
+        pool_connections=1000
+    )
+    try:
+        t = odps.get_table(name=table)
+        log_.info(f"检查分区是否存在-【 dt={date} hh={hour}】")
+        check_res = t.exist_partition(partition_spec=f'dt={date},hh={hour}')
+        if check_res:
+            sql = f'select * from {project}.{table} where dt = {date} and hh = {hour}'
+            log_.info(sql)
+            with odps.execute_sql(sql=sql).open_reader() as reader:
+                data_count = reader.count
+        else:
+            log_.info("表{}分区{}/{}不存在".format(table, date, hour))
+            data_count = 0
+    except Exception as e:
+        log_.error("table:{},date:{},hour:{} no data. return data_count=0,报错原因是:{}".format(table, date, hour, e))
+        data_count = 0
+    return data_count
+
+def get_sql(project, table, date, hour):
+    sql = '''
+    SELECT  vid
+            ,videoid_arr
+            ,score_arr
+    FROM    {}.{}
+    WHERE   dt = '{}'
+    and     hh = '{}'
+    '''.format(
+        project, table, date, hour
+    )
+    print("sql:" + sql)
+    records = execute_sql_from_odps(project=project, sql=sql)
+    video_list = []
+    with records.open_reader() as reader:
+        for record in reader:
+            key = record['vid']
+            m = dict()
+            try:
+                m["videoid_arr"] = record['videoid_arr']
+            except Exception as e:
+                log_.error(e)
+            try:
+                m["score_arr"] = record['score_arr']
+            except Exception as e:
+                log_.error(e)
+
+            json_str = json.dumps(m)
+            video_list.append([key, json_str])
+    return video_list
+
+
+def main():
+    try:
+        date = sys.argv[1]
+        hour = sys.argv[2]
+    except Exception as e:
+        date = datetime.now().strftime('%Y%m%d')
+        hour = datetime.now().hour
+        log_.info("没有读取到参数,采用系统时间:{}".format(e))
+    log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
+    if hour in ["00", "01", "02"]:
+        log_.info(f"hour={hour}不执行,直接返回。")
+        return
+    # 1 判断上游数据表是否生产完成
+    project = "loghubods"
+    table = "alg_recsys_recall_cf_rovn"
+    table_data_cnt = check_data(project, table, date, hour)
+    if table_data_cnt == 0:
+        log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
+        Timer(60, main).start()
+    else:
+        log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
+        # 2 读取数据表 处理特征
+        video_list = get_sql(project, table, date, hour)
+        # 3 写入redis
+        log_.info("video的数据量:{}".format(len(video_list)))
+        records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
+
+if __name__ == '__main__':
+    log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+    main()
+    log_.info("完成执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+
+
+# cd /root/zhangbo/rov-offline
+# python alg_recsys_recall_02_cfrovn_redis.py 20240517 17
+
+"""
+    !!!!!!!!!!!!!!
+    更改字段:table 表名
+            REDIS_PREFIX redis的key
+            EXPIRE_TIME redis的过期时间
+            sql 各种字段
+            record 各种字段
+            if hour in ["00"]: 哪些小时不执行
+"""

+ 26 - 0
write_redis/alg_recsys_recall_02_cfrovn_redis_task.sh

@@ -0,0 +1,26 @@
+source /etc/profile
+echo $ROV_OFFLINE_ENV
+
+log_dir="my_logs_recall_02_cfrovn_redis_task"
+if [ ! -d ${log_dir} ]; then
+    mkdir ${log_dir}
+fi
+
+cur_time="`date +%Y%m%d`"
+cur_h="`date +%H`"
+echo "开始执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+if [[ $ROV_OFFLINE_ENV == 'test' ]]; then
+  /root/anaconda3/bin/python alg_recsys_recall_02_cfrovn_redis.py $cur_time $cur_h
+elif [[ $ROV_OFFLINE_ENV == 'pro' ]]; then
+  /root/anaconda3/bin/python alg_recsys_recall_02_cfrovn_redis.py $cur_time $cur_h
+  if [ $? -ne 0 ]; then
+    msg="写redis的任务,cf的rovn用于召回,alg_recsys_recall_02_cfrovn_redis.py:${cur_time}-${cur_h}-something-is-wrong."
+    /root/anaconda3/bin/python utils_monitor.py ${msg}
+  fi
+fi
+echo "结束执行时间:{$(date "+%Y-%m-%d %H:%M:%S")}"
+echo "all done"
+
+
+
+#sh alg_recsys_recall_02_cfrovn_redis_task.sh

+ 43 - 0
write_redis/utils_monitor.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+
+root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if root_dir not in sys.path:
+    sys.path.append(root_dir)
+    print("******** sys.path ********")
+    print(sys.path)
+
+from my_config import set_config
+from my_utils import send_msg_to_feishu
+from log import Log
+import datetime
+config_, _ = set_config()
+log_ = Log()
+
+server_robot = {
+    'webhook': 'https://open.feishu.cn/open-apis/bot/v2/hook/8de4de35-30ed-4692-8854-7a154e89b2f2',
+    'key_word': '服务报警'
+}
+def _monitor(dt, hh, msg):
+    """rov模型预测列表"""
+    if hh > 6:
+        msg_text = f"\n- 所属项目: rov-offline" \
+                   f"\n- 告警名称: 离线更新数据不符合预期" \
+                   f"\n- 所属环境: {config_.ENV_TEXT}" \
+                   f"\n- now_date: {dt}" \
+                   f"\n- now_h: {hh}" \
+                   f"\n- 告警描述: {msg}"
+        log_.info(f"msg_text = {msg_text}")
+        send_msg_to_feishu(
+            webhook=server_robot.get('webhook'),
+            key_word=server_robot.get('key_word'),
+            msg_text=msg_text
+        )
+
+if __name__ == '__main__':
+    dt = datetime.datetime.today().strftime('%Y%m%d')
+    hh = datetime.datetime.now().hour
+    msg = sys.argv[1]
+    _monitor(dt, hh, msg)
+    log_.info("end")