import datetime
import logging
import multiprocessing
import time
import traceback
import gevent
from threading import Timer
from utils import RedisHelper, data_check, get_feature_data, send_msg_to_feishu
from config import set_config
from log import Log
config_, _ = set_config()
log_ = Log()
redis_helper = RedisHelper()

# features = [
#     'apptype',
#     'return1mids',
#     'return2_3mids',
#     'return4_8mids',
#     'return9_24mids',
#     'return25_nmids',
#     'return0share1mids',
#     'return0share2_nmids'
# ]


def to_redis(group, mid_list, class_key_list):
    log_.info(f"group = {group} update redis start ...")
    start_time = time.time()
    log_.info(f"mid count = {len(mid_list)}")
    for class_key in class_key_list:
        for i in range(len(mid_list) // 100 + 1):
            # log_.info(f"i = {i}")
            mid_temp_list = mid_list[i * 100:(i + 1) * 100]
            # print(mid_temp_list)
            task_list = [
                gevent.spawn(redis_helper.set_data_to_redis,
                             f"{config_.KEY_NAME_PREFIX_MID_GROUP}{class_key}:{mid}", group, 28 * 3600)
                for mid in mid_temp_list
            ]
            gevent.joinall(task_list)
    log_.info(f"group = {group}, mid count = {len(mid_list)}, update redis finished! "
              f"execute time = {(time.time() - start_time) / 60}min")


def update_user_group_to_redis(project, table, dt, app_type_list, features, ad_mid_group_key_params):
    """更新mid对应分组到redis中"""
    # 获取用户分组数据
    feature_df = get_feature_data(project=project, table=table, features=features, dt=dt)
    feature_df['apptype'] = feature_df['apptype'].astype(int)
    feature_df = feature_df[feature_df['apptype'].isin(app_type_list)]
    # print(len(feature_df))
    # group_list = features[1:]
    pool = multiprocessing.Pool(processes=len(ad_mid_group_key_params))
    for group, class_key_list in ad_mid_group_key_params.items():
        mid_list = feature_df[group].tolist()
        mid_list = list(set(mid_list))
        mid_list = [mid for mid in mid_list if mid is not None]
        # class_key_list = ad_mid_group_key_params.get(group)
        pool.apply_async(func=to_redis, args=(group, mid_list, class_key_list))
    pool.close()
    pool.join()


def get_group_keys_mapping(ad_mid_group):
    ad_mid_group_key_params = {}
    features = ['apptype']
    for class_key, group_list in ad_mid_group.items():
        for group in group_list:
            if group not in features:
                features.append(group)
                ad_mid_group_key_params[group] = [class_key]
            else:
                ad_mid_group_key_params[group].append(class_key)
    return features, ad_mid_group_key_params


def timer_check():
    try:
        app_type_list = config_.AD_APP_TYPE_LIST
        ad_mid_group = config_.AD_MID_GROUP
        project = config_.ad_model_data['user_group'].get('project')
        table = config_.ad_model_data['user_group'].get('table')
        now_date = datetime.datetime.today()
        dt = datetime.datetime.strftime(now_date, '%Y%m%d')
        log_.info(f"now_date: {dt}")
        now_min = datetime.datetime.now().minute
        # 查看当前更新的数据是否已准备好
        data_count = data_check(project=project, table=table, dt=dt)
        if data_count > 0:
            log_.info(f"user group data count = {data_count}")
            # 获取features & 用户分组对应key
            features, ad_mid_group_key_params = get_group_keys_mapping(ad_mid_group=ad_mid_group)
            log_.info(f"features = {features}, \nad_mid_group_key_params = {ad_mid_group_key_params}")
            # 数据准备好,进行更新
            update_user_group_to_redis(project=project, table=table, dt=dt, app_type_list=app_type_list,
                                       features=features, ad_mid_group_key_params=ad_mid_group_key_params)
            log_.info(f"user group data update end!")
            send_msg_to_feishu(
                webhook=config_.FEISHU_ROBOT['ad_user_group_update_robot'].get('webhook'),
                key_word=config_.FEISHU_ROBOT['ad_user_group_update_robot'].get('key_word'),
                msg_text=f"\nrov-offline{config_.ENV_TEXT} - 用户分组数据更新完成\n"
            )

        else:
            # 数据没准备好,1分钟后重新检查
            Timer(60, timer_check).start()

    except Exception as e:
        log_.error(f"用户分组数据更新失败, exception: {e}, traceback: {traceback.format_exc()}")
        send_msg_to_feishu(
            webhook=config_.FEISHU_ROBOT['ad_user_group_update_robot'].get('webhook'),
            key_word=config_.FEISHU_ROBOT['ad_user_group_update_robot'].get('key_word'),
            msg_text=f"\nrov-offline{config_.ENV_TEXT} - 用户分组数据更新失败\n"
                     f"exception: {e}\n"
                     f"traceback: {traceback.format_exc()}"
        )
        # 5分钟后重试
        Timer(5*60, timer_check).start()


if __name__ == '__main__':
    timer_check()