alg_recsys_feature_08_vidh24predv2_redis.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. # -*- coding: utf-8 -*-
  2. import os
  3. import sys
  4. import traceback
  5. root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
  6. if root_dir not in sys.path:
  7. sys.path.append(root_dir)
  8. print("******** sys.path ********")
  9. print(sys.path)
  10. from multiprocessing import Process
  11. from odps import ODPS
  12. from threading import Timer
  13. import threading
  14. from my_utils import RedisHelper, execute_sql_from_odps
  15. from my_config import set_config
  16. from log import Log
  17. import json
  18. from datetime import datetime
  19. from queue import Queue
  20. from tqdm import tqdm
  21. import time
  22. config_, _ = set_config()
  23. log_ = Log()
  24. redis_helper = RedisHelper()
  25. REDIS_PREFIX = "redis:vid_vovh24pred_time:"
  26. EXPIRE_TIME = 3 * 3600
  27. global PARTITION
  28. def worker(queue, executor):
  29. while True:
  30. row = queue.get()
  31. if row is None: # 结束信号
  32. queue.task_done()
  33. break
  34. executor(row)
  35. queue.task_done()
  36. def records_process_for_list(records, executor, max_size=50, num_workers=10):
  37. # 创建一个线程安全的队列
  38. queue = Queue(maxsize=max_size) # 可以调整 maxsize 以控制内存使用
  39. # 设置线程池大小
  40. num_workers = num_workers
  41. # 启动工作线程
  42. threads = []
  43. for _ in range(num_workers):
  44. t = threading.Thread(target=worker, args=(queue, executor))
  45. t.start()
  46. threads.append(t)
  47. # 读取数据并放入队列
  48. for row in tqdm(records):
  49. queue.put(row)
  50. # 发送结束信号
  51. for _ in range(num_workers):
  52. queue.put(None)
  53. # 等待所有任务完成
  54. queue.join()
  55. # 等待所有工作线程结束
  56. for t in threads:
  57. t.join()
  58. def process_and_store(row):
  59. table_key, json_str = row
  60. key = REDIS_PREFIX + PARTITION + ":" + str(table_key)
  61. expire_time = EXPIRE_TIME
  62. # print(key + "\t" + json_str)
  63. redis_helper.set_data_to_redis(key, json_str, expire_time)
  64. def check_data(project, table, date, hour) -> int:
  65. """检查数据是否准备好,输出数据条数"""
  66. odps = ODPS(
  67. access_id=config_.ODPS_CONFIG['ACCESSID'],
  68. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  69. project=project,
  70. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  71. connect_timeout=3000,
  72. read_timeout=500000,
  73. pool_maxsize=1000,
  74. pool_connections=1000
  75. )
  76. try:
  77. t = odps.get_table(name=table)
  78. log_.info(f"检查分区是否存在-【 dt={date} hh={hour}】")
  79. check_res = t.exist_partition(partition_spec=f'dt={date},hh={hour}')
  80. if check_res:
  81. sql = f'select * from {project}.{table} where dt = {date} and hh = {hour}'
  82. log_.info(sql)
  83. with odps.execute_sql(sql=sql).open_reader() as reader:
  84. data_count = reader.count
  85. else:
  86. log_.info("表{}分区{}/{}不存在".format(table, date, hour))
  87. data_count = 0
  88. except Exception as e:
  89. log_.error("table:{},date:{},hour:{} no data. return data_count=0,报错原因是:{}".format(table, date, hour, e))
  90. data_count = 0
  91. return data_count
  92. def get_sql(project, table, date, hour):
  93. sql = '''
  94. SELECT vid
  95. ,feature
  96. FROM {}.{}
  97. WHERE dt = '{}'
  98. and hh = '{}'
  99. '''.format(
  100. project, table, date, hour
  101. )
  102. print("sql:" + sql)
  103. records = execute_sql_from_odps(project=project, sql=sql)
  104. video_list = []
  105. with records.open_reader() as reader:
  106. for record in reader:
  107. key = record['vid']
  108. m = dict()
  109. try:
  110. json_str = record['feature']
  111. except Exception as e:
  112. json_str = json.dumps(m)
  113. log_.error(e)
  114. log_.error(traceback.format_exc())
  115. video_list.append([key, json_str])
  116. return video_list
  117. def main():
  118. try:
  119. date = sys.argv[1]
  120. hour = sys.argv[2]
  121. except Exception as e:
  122. date = datetime.now().strftime('%Y%m%d')
  123. hour = datetime.now().hour
  124. log_.info("没有读取到参数,采用系统时间:{}".format(e))
  125. log_.info("使用时间参数-日期:{},小时:{}".format(date, str(hour)))
  126. if hour in ["00", "01", "02", "03"]:
  127. log_.info(f"hour={hour}不执行,直接返回。")
  128. partition = str(date) + str(hour)
  129. redis_helper.set_data_to_redis(
  130. key_name=REDIS_PREFIX + "partition",
  131. value=partition,
  132. expire_time=EXPIRE_TIME * 3
  133. )
  134. return
  135. # 1 判断上游数据表是否生产完成
  136. project = "loghubods"
  137. table = "alg_vid_vov_hour"
  138. run_flag = True
  139. begin_ts = int(time.time())
  140. table_data_cnt = 0
  141. while run_flag:
  142. if int(time.time()) - begin_ts >= 60 * 20:
  143. log_.info("等待上游数据超过40分钟了,认为失败退出:过了{}秒。".format(int(time.time()) - begin_ts))
  144. sys.exit(1)
  145. table_data_cnt = check_data(project, table, date, hour)
  146. if table_data_cnt == 0:
  147. log_.info("上游数据{}未就绪{}/{},等待...".format(table, date, hour))
  148. log_.info("等待2分钟")
  149. time.sleep(60 * 2)
  150. else:
  151. run_flag = False
  152. log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
  153. # 2 读取数据表 处理特征
  154. video_list = get_sql(project, table, date, hour)
  155. # 3 写入redis
  156. log_.info("video的数据量:{}".format(len(video_list)))
  157. partition = str(date) + str(hour)
  158. global PARTITION
  159. PARTITION = partition
  160. records_process_for_list(video_list, process_and_store, max_size=50, num_workers=8)
  161. # 4 定义时间的key 独立写入redis
  162. redis_helper.set_data_to_redis(
  163. key_name=REDIS_PREFIX + "partition",
  164. value=partition,
  165. expire_time=EXPIRE_TIME*3
  166. )
  167. if __name__ == '__main__':
  168. log_.info("开始执行:" + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
  169. process = Process(target=main)
  170. process.start()
  171. # 等待子进程完成或超时
  172. timeout = 60 * 40
  173. process.join(timeout=timeout) # 设置超时为3600秒(1小时)
  174. if process.is_alive():
  175. print("脚本执行时间超过1小时,执行失败,经过了{}秒。".format(timeout))
  176. process.terminate() # 终止子进程
  177. sys.exit(1) # 直接退出主进程并返回状态码999
  178. else:
  179. # 检查子进程的返回值
  180. exit_code = process.exitcode
  181. if exit_code != 0:
  182. print(f"子进程以状态码 {exit_code} 退出,执行失败。")
  183. os._exit(exit_code) # 将子进程的状态码返回到主进程
  184. else:
  185. print("子进程正常结束。")
  186. os._exit(0)
  187. # cd /root/zhangbo/rov-offline
  188. # python alg_recsys_feature_08_vidh24predv2_redis.py 20241107 14
  189. """
  190. !!!!!!!!!!!!!! -----按小时更新-----
  191. 更改字段:table 表名
  192. 新增了一个partition作为redis的key拼接
  193. REDIS_PREFIX redis的key
  194. EXPIRE_TIME redis的过期时间
  195. 两段sql 各种字段 注意分区是否有“分钟”
  196. record 各种字段
  197. if hour in ["00"]: 哪些小时不执行
  198. process.join(timeout=3600) 任务超时时间3600
  199. int(time.time()) - begin_ts >= 60*50 任务超时时间3000
  200. """