import datetime import multiprocessing import time import traceback import gevent from threading import Timer from utils import RedisHelper, data_check, get_feature_data, send_msg_to_feishu from config import set_config from log import Log config_, _ = set_config() log_ = Log() redis_helper = RedisHelper() features = [ 'apptype', 'return1mids', 'return2_3mids', 'return4_8mids', 'return9_24mids', 'return25_nmids', 'return0share1mids', 'return0share2_nmids' ] def to_redis(group, mid_list): log_.info(f"group = {group} update redis start ...") start_time = time.time() log_.info(f"mid count = {len(mid_list)}") for i in range(len(mid_list) // 100 + 1): # log_.info(f"i = {i}") mid_temp_list = mid_list[i * 100:(i + 1) * 100] task_list = [ gevent.spawn(redis_helper.set_data_to_redis, f"{config_.KEY_NAME_PREFIX_MID_GROUP}{mid}", group, 26 * 3600) for mid in mid_temp_list ] gevent.joinall(task_list) log_.info(f"group = {group}, mid count = {len(mid_list)}, update redis finished! " f"execute time = {(time.time() - start_time) / 60}min") def update_user_group_to_redis(project, table, dt, app_type_list): """更新mid对应分组到redis中""" # 获取用户分组数据 feature_df = get_feature_data(project=project, table=table, features=features, dt=dt) feature_df['apptype'] = feature_df['apptype'].astype(int) # feature_df = feature_df[feature_df['apptype'] == app_type] feature_df = feature_df[feature_df['apptype'].isin(app_type_list)] print(len(feature_df)) group_list = features[1:] pool = multiprocessing.Pool(processes=len(group_list)) for group in group_list: mid_list = feature_df[group].tolist() mid_list = list(set(mid_list)) mid_list = [mid for mid in mid_list if mid is not None] pool.apply_async(func=to_redis, args=(group, mid_list)) pool.close() pool.join() def timer_check(): try: # app_type = config_.APP_TYPE['VLOG'] app_type_list = config_.AD_APP_TYPE_LIST project = config_.ad_model_data['user_group'].get('project') table = config_.ad_model_data['user_group'].get('table') now_date = datetime.datetime.today() dt = datetime.datetime.strftime(now_date, '%Y%m%d') log_.info(f"now_date: {dt}") now_min = datetime.datetime.now().minute # 查看当前更新的数据是否已准备好 data_count = data_check(project=project, table=table, dt=dt) if data_count > 0: log_.info(f"user group data count = {data_count}") # 数据准备好,进行更新 update_user_group_to_redis(project=project, table=table, dt=dt, app_type_list=app_type_list) log_.info(f"user group data update end!") elif now_min > 45: log_.info('user group data is None!') send_msg_to_feishu( webhook=config_.FEISHU_ROBOT['server_robot'].get('webhook'), key_word=config_.FEISHU_ROBOT['server_robot'].get('key_word'), msg_text=f"rov-offline{config_.ENV_TEXT} - 用户分组数据未准备好!\n" f"traceback: {traceback.format_exc()}" ) else: # 数据没准备好,1分钟后重新检查 Timer(60, timer_check).start() except Exception as e: log_.error(f"用户分组数据更新失败, exception: {e}, traceback: {traceback.format_exc()}") send_msg_to_feishu( webhook=config_.FEISHU_ROBOT['server_robot'].get('webhook'), key_word=config_.FEISHU_ROBOT['server_robot'].get('key_word'), msg_text=f"rov-offline{config_.ENV_TEXT} - 用户分组数据更新失败\n" f"exception: {e}\n" f"traceback: {traceback.format_exc()}" ) if __name__ == '__main__': timer_check()