alg_recsys_recall_4h_region_trend.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. # -*- coding: utf-8 -*-
  2. import traceback
  3. import datetime
  4. from odps import ODPS
  5. from threading import Timer
  6. from utils import RedisHelper, get_data_from_odps, send_msg_to_feishu
  7. from config import set_config
  8. from log import Log
  9. from queue import Queue
  10. from tqdm import tqdm
  11. import threading
  12. config_, _ = set_config()
  13. log_ = Log()
  14. region_name2code: dict = config_.REGION_CODE
  15. redis_helper = RedisHelper()
  16. def worker(queue, executor):
  17. while True:
  18. row = queue.get()
  19. if row is None: # 结束信号
  20. queue.task_done()
  21. break
  22. executor(row)
  23. queue.task_done()
  24. def records_process_for_list(records, executor, max_size=50, num_workers=10):
  25. # 创建一个线程安全的队列
  26. queue = Queue(maxsize=max_size) # 可以调整 maxsize 以控制内存使用
  27. # 设置线程池大小
  28. num_workers = num_workers
  29. # 启动工作线程
  30. threads = []
  31. for _ in range(num_workers):
  32. t = threading.Thread(target=worker, args=(queue, executor))
  33. t.start()
  34. threads.append(t)
  35. # 读取数据并放入队列
  36. for row in tqdm(records):
  37. queue.put(row)
  38. # 发送结束信号
  39. for _ in range(num_workers):
  40. queue.put(None)
  41. # 等待所有任务完成
  42. queue.join()
  43. # 等待所有工作线程结束
  44. for t in threads:
  45. t.join()
  46. def check_data(project, table, partition) -> int:
  47. """检查数据是否准备好,输出数据条数"""
  48. odps = ODPS(
  49. access_id=config_.ODPS_CONFIG['ACCESSID'],
  50. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  51. project=project,
  52. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  53. connect_timeout=3000,
  54. read_timeout=500000,
  55. pool_maxsize=1000,
  56. pool_connections=1000
  57. )
  58. try:
  59. t = odps.get_table(name=table)
  60. log_.info(f"检查分区是否存在-【 dt={partition} 】")
  61. check_res = t.exist_partition(partition_spec=f'dt={partition}')
  62. if check_res:
  63. sql = f'select * from {project}.{table} where dt = {partition}'
  64. log_.info(sql)
  65. with odps.execute_sql(sql=sql).open_reader() as reader:
  66. data_count = reader.count
  67. else:
  68. log_.info("表{}分区{}不存在".format(table, partition))
  69. data_count = 0
  70. except Exception as e:
  71. log_.error("table:{},partition:{} no data. return data_count=0:{}".format(table, partition, e))
  72. data_count = 0
  73. return data_count
  74. def get_table_data(project, table, partition):
  75. """获取全部分区数据"""
  76. records = get_data_from_odps(date=partition, project=project, table=table)
  77. data = []
  78. for record in records:
  79. tmp = {}
  80. for col_name in ["region", "videoid_array_sum", "videoid_array_avg"]:
  81. tmp[col_name] = record[col_name]
  82. data.append(tmp)
  83. return data
  84. def region_match(region_cn: str, region_name2code: dict):
  85. for r in region_name2code:
  86. if region_cn == r or region_cn in r or (
  87. region_cn.endswith("省") and region_cn.split("省")[0] in r
  88. ):
  89. return region_name2code[r]
  90. return None
  91. def process_and_store(row):
  92. region_code = row["region_code"]
  93. videoid_array_sum = row["videoid_array_sum"]
  94. videoid_array_avg = row["videoid_array_avg"]
  95. key1 = "alg_recsys_recall_4h_region_trend_sum_" + region_code
  96. key2 = "alg_recsys_recall_4h_region_trend_avg_" + region_code
  97. expire_time = 24 * 3600 * 4
  98. redis_helper.set_data_to_redis(key1, videoid_array_sum, expire_time)
  99. redis_helper.set_data_to_redis(key2, videoid_array_avg, expire_time)
  100. log_.info("trend-sum写入数据key={},value={}".format(key1, videoid_array_sum))
  101. log_.info("trend-avg写入数据key={},value={}".format(key2, videoid_array_avg))
  102. """
  103. 数据表链接:https://dmc-cn-hangzhou.data.aliyun.com/dm/odps-table/odps.loghubods.alg_recsys_recall_strategy_trend/
  104. """
  105. def h_timer_check():
  106. try:
  107. log_.info(f"开始执行: {datetime.datetime.strftime(datetime.datetime.today(), '%Y%m%d%H')}")
  108. #1 判断数据表是否生产完成
  109. project = "loghubods"
  110. table = "alg_recsys_recall_strategy_trend"
  111. partition = "2023122019"
  112. table_data_cnt = check_data(project, table, partition)
  113. if table_data_cnt == 0:
  114. log_.info("上游数据{}未就绪{},等待...".format(table, partition))
  115. Timer(60, h_timer_check).start()
  116. else:
  117. log_.info("上游数据就绪,count={},开始读取数据表".format(table_data_cnt))
  118. #2 读取数据表
  119. data = get_table_data(project, table, partition)
  120. data_new = []
  121. for one in data:
  122. region_code = region_match(one["region"], region_name2code)
  123. if region_code:
  124. one["region_code"] = region_code
  125. data_new.append(one)
  126. else:
  127. log_.info("{}被过滤掉了".format(one["region"]))
  128. log_.info("数据处理完成,数据数量={},所有的地域:".format(len(data_new), ",".join([
  129. one["region"] for one in data
  130. ])))
  131. log_.info("开始处理和写入redis")
  132. #3 写入redis
  133. records_process_for_list(data, process_and_store, max_size=10, num_workers=5)
  134. except Exception as e:
  135. log_.error(f"4小时地域-趋势统计数据更新失败, exception: {e}, traceback: {traceback.format_exc()}")
  136. send_msg_to_feishu(
  137. webhook=config_.FEISHU_ROBOT['server_robot'].get('webhook'),
  138. key_word=config_.FEISHU_ROBOT['server_robot'].get('key_word'),
  139. msg_text=f"rov-offline{config_.ENV_TEXT} - 4小时地域-趋势统计数据更新失败\n"
  140. f"exception: {e}\n"
  141. f"traceback: {traceback.format_exc()}"
  142. )
  143. if __name__ == '__main__':
  144. log_.info("文件alg_recsys_recall_4h_region_trend.py:「4小时地域-趋势统计」 开始执行")
  145. h_timer_check()