import asyncio import sys import time import requests import utils import logging import os import docker import rov_server_config from concurrent.futures import ThreadPoolExecutor health_instances = [] ess_instances = [] # slb_id = 'lb-bp1werfophtsjzfr76njm' # # 修改负载均衡权限 # slb_client_params = { # 'access_key_id': 'LTAIuPbTPL3LDDKN', # 'access_key_secret': 'ORcNedKwWuwVtcq4IRFtUDZgS0b1le', # 'region_id': 'cn-hangzhou' # } # # 购买机器权限 # create_client_params = { # 'access_key_id': 'LTAI4GBWbFvvXoXsSVBe1o9f', # 'access_key_secret': 'kRAikWitb4kDxaAyBqNrmLmllMEDO3', # 'region_id': 'cn-hangzhou' # } # # # 机器配置 # instance_config = { # # 使用的镜像信息 # 'image_id': 'm-bp1e5jx8eqhq22l91xw7', # # 设置实例规格 # 'instance_type': 'ecs.ic5.xlarge', # # 选择的交换机 # 'vswitch_id': 'vsw-bp19lpjwtc6j0p0m9mdc2', # # 当前VPC类型的安全组 # 'security_group_id': 'sg-bp1irhrkr4vfj272hk4y', # # 硬盘的大小,单位:G # 'disk_size': '200', # # 服务器命名 # 'instance_name': 'ESS-rov-server-[1,2]', # # 服务器所在区域 # 'zone_id': 'cn-hangzhou-h', # # 磁盘类型:云盘 # 'disk_category': 'cloud_efficiency', # # 密钥 # 'key_pair_name': 'stuuudy' # } # # # 服务启动脚本 # start_sh_dir = os.path.dirname(os.path.realpath(__file__)) # start_sh_filename = 'rov_server_start.sh' # with open(file=os.path.join(start_sh_dir, start_sh_filename), mode='r', encoding='utf-8') as rf: # file_content = rf.read() # start_sh = { # 'target_dir': '/home/piaoquan_server_sh', # 'name': start_sh_filename, # 'content': file_content, # } def server_health_check(client, instance_id): """ 服务健康检查 :param client: 客户端连接 :param instance_id: instanceId :return: """ global health_instances ip_address = utils.get_ip_address(client=client, instance_id=instance_id) while True: health_check_url = f"http://{ip_address}:5001/healthcheck" try: http_code = requests.get(health_check_url).status_code except: logging.info("images is downloading") http_code = 0 if http_code == 200: health_instances.append((instance_id, ip_address)) break else: time.sleep(10) def set_instance_weight_process(client, instance_id_list, weight_list): """ 修改服务器的权重值 :param client: slb客户端连接 :param instance_id_list: instance id list :param weight_list: 权重修改列表 type-list [(weight, sleep_time), ...] :return: """ for weight, sleep_time in weight_list: flag = True while flag: try: utils.set_weight_for_instances(client=client, slb_id=rov_server_config.slb_id, instance_id_list=instance_id_list, weight=weight) time.sleep(sleep_time) flag = False except Exception as e: time.sleep(10) continue async def run_server(create_client, slb_client, instance_ids, max_workers): # 1. 发送启动脚本到机器上 utils.send_file_to_ecs(client=create_client, instance_id_list=instance_ids, **rov_server_config.start_sh) logging.info(f"send start shell file finished, instances: {instance_ids}") # 2. 启动服务 server_start_sh = os.path.join(rov_server_config.start_sh['target_dir'], rov_server_config.start_sh['name']) server_start_commend = f"sh {server_start_sh}" utils.run_command(client=create_client, instance_ids=instance_ids, command=server_start_commend) # 3. 异步探活 global health_instances health_instances = [] loop = asyncio.get_running_loop() executor = ThreadPoolExecutor(max_workers=max_workers) tasks = [ loop.run_in_executor(executor, server_health_check, *args) for args in [(slb_client, instance_id) for instance_id in instance_ids] ] await asyncio.wait(tasks) logging.info(f"health instances count: {len(health_instances)}, {health_instances}") # 4. 挂载流量 if len(health_instances) == len(instance_ids): # 所有机器探活成功 time.sleep(60) add_weight_list = [(10, 30), (20, 20), (40, 10), (60, 10), (80, 10), (100, 10)] set_instance_weight_process(client=slb_client, instance_id_list=instance_ids, weight_list=add_weight_list) logging.info(f"add weight instances count: {len(health_instances)}") else: logging.info(f"instances count: {len(instance_ids)},\nhealth instances count: {len(health_instances)}") sys.exit() async def ess_instance(create_client, slb_client, ess_count, max_workers): """ 扩容机器并运行新服务 :param create_client: 购买机器客户端连接 :param slb_client: 修改负载均衡权限 :param ess_count: 扩容数量 :param max_workers: 线程数 :return: """ # 1. 购买机器并启动 ess_instance_ids = utils.create_multiple_instances( amount=ess_count, client=create_client, **rov_server_config.instance_config, ) time.sleep(60) # 2. 发送启动脚本到机器上 utils.send_file_to_ecs(client=create_client, instance_id_list=ess_instance_ids, **rov_server_config.start_sh) logging.info(f"send start shell file finished, instances: {ess_instance_ids}") # 3. 启动服务 server_start_sh = os.path.join(rov_server_config.start_sh['target_dir'], rov_server_config.start_sh['name']) server_start_commend = f"sh {server_start_sh}" utils.run_command(client=create_client, instance_ids=ess_instance_ids, command=server_start_commend) # 4. 异步探活 global health_instances health_instances = [] loop = asyncio.get_running_loop() executor = ThreadPoolExecutor(max_workers=max_workers) tasks = [ loop.run_in_executor(executor, server_health_check, *args) for args in [(slb_client, instance_id) for instance_id in ess_instance_ids] ] await asyncio.wait(tasks) logging.info(f"health instances count: {len(health_instances)}, {health_instances}") # 5. 挂载流量 if len(health_instances) == len(ess_instance_ids): # 所有机器探活成功 time.sleep(60) add_weight_list = [(10, 30), (20, 20), (40, 10), (60, 10), (80, 10), (100, 10)] # set_instance_weight_process(client=slb_client, instance_id_list=ess_instance_ids, weight_list=add_weight_list) global ess_instances ess_instances.extend(ess_instance_ids) logging.info(f"ess count: {ess_count}, " f"create count: {len(ess_instance_ids)}, " f"finished count: {len(health_instances)}") else: logging.info(f"ess count: {ess_count}, " f"create count: {len(ess_instance_ids)}, " f"health count: {len(health_instances)}") sys.exit() def remove_container_image(client, instance_id, container_name): """ 移除旧容器并删除旧镜像 :param client: 客户端连接 :param instance_id: instanceId type-string :param container_name: 容器名称 type-string :return: """ ip_address = utils.get_ip_address(client=client, instance_id=instance_id) logging.info(f"服务器信息:{instance_id}/{ip_address}") client = docker.DockerClient(base_url=f'tcp://{ip_address}:2375', timeout=60) # 移除旧的容器 container_remove_retry = 3 i = 0 while i < container_remove_retry: try: container_id = client.containers.get(container_name) container_id.remove(force=True) break except Exception as e: i += 1 print("容器不存在或者无法删除当前容器") # 删除旧镜像 images_remove_retry = 3 j = 0 while j < images_remove_retry: try: images = client.images.list() for image in images: client.images.remove(force=True, image=image.tags[0]) time.sleep(2) except Exception as e: i += 1 print("镜像不存在,无法获取到镜像ID") async def update_instance(create_client, slb_client, instance_ids, max_workers): """ 线上机器更新 :param create_client: :param slb_client: slb客户端连接 :param instance_ids: instanceId type-list :param max_workers: :return: """ media_index = len(instance_ids)//2 instance_ids_group = [instance_ids[:media_index], instance_ids[media_index:]] for instance_id_list in instance_ids_group: # 1. 摘流量 set_instance_weight_process(client=slb_client, instance_id_list=instance_id_list, weight_list=[(0, 60)]) logging.info(f"set weight with 0 finished, instances: {instance_id_list}") # 2. 异步移除旧容器并删除旧镜像 container_name = 'rov-server' loop = asyncio.get_running_loop() executor = ThreadPoolExecutor(max_workers=max_workers) tasks = [ loop.run_in_executor(executor, remove_container_image, *args) for args in [(slb_client, instance_id, container_name) for instance_id in instance_id_list] ] await asyncio.wait(tasks) logging.info(f"remove container & images finished, instances: {instance_id_list}") # 3. 发送启动脚本到机器上 utils.send_file_to_ecs(client=create_client, instance_id_list=instance_id_list, **rov_server_config.start_sh) logging.info(f"send start shell file finished, instances: {instance_id_list}") # 4. 启动服务 server_start_sh = os.path.join(rov_server_config.start_sh['target_dir'], rov_server_config.start_sh['name']) server_start_commend = f"sh {server_start_sh}" utils.run_command(client=create_client, instance_ids=instance_id_list, command=server_start_commend) # 5. 异步探活 global health_instances health_instances = [] loop = asyncio.get_running_loop() executor = ThreadPoolExecutor(max_workers=max_workers) tasks = [ loop.run_in_executor(executor, server_health_check, *args) for args in [(slb_client, instance_id) for instance_id in instance_id_list] ] await asyncio.wait(tasks) logging.info(f"health instances count: {len(health_instances)}, {health_instances}") # 6. 挂载流量 if len(health_instances) == len(instance_id_list): # 所有机器探活成功 time.sleep(60) add_weight_list = [(10, 30), (20, 20), (40, 10), (60, 10), (80, 10), (100, 10)] set_instance_weight_process(client=slb_client, instance_id_list=instance_id_list, weight_list=add_weight_list) logging.info(f"finished count: {len(health_instances)}") else: logging.info(f"health count: {len(health_instances)}") sys.exit() def remove_instances(create_client, slb_client, instance_ids): """ 停止并释放机器 :param create_client: :param slb_client: :param instance_ids: instanceId type-list :return: None """ # 1. 摘流量 set_instance_weight_process(client=slb_client, instance_id_list=instance_ids, weight_list=[(0, 60)]) logging.info(f"set weight = 0 finished, instances: {instance_ids}") time.sleep(10) # 2. 停止机器 utils.stop_instances(client=create_client, instance_ids=instance_ids) logging.info(f"instances stop finished, instances: {instance_ids}") # 3. 判断机器运行状态是否为Stopped response = utils.get_instances_status(client=create_client, instance_ids=instance_ids) if response.get('Code') is None: instances_list = response.get('InstanceStatuses').get('InstanceStatus') stopped_instances = [instance.get('InstanceId') for instance in instances_list if instance.get('Status') == 'Stopped'] if len(stopped_instances) == len(instance_ids): logging.info(f"instances stopped status set success, instances: {stopped_instances}") else: logging.info(f"stopped instances count = {len(instance_ids)}, instances: {stopped_instances}") sys.exit() else: logging.error(response) sys.exit() # 4. 释放机器 response = utils.release_instances(client=create_client, instance_ids=stopped_instances) if response.get('Code') is None: logging.info(f"release instances finished, instances: {stopped_instances}") else: logging.error(f"release instances fail!!!") sys.exit() def main(): slb_client = utils.connect_client(access_key_id=rov_server_config.slb_client_params['access_key_id'], access_key_secret=rov_server_config.slb_client_params['access_key_secret'], region_id=rov_server_config.slb_client_params['region_id']) create_client = utils.connect_client(access_key_id=rov_server_config.create_client_params['access_key_id'], access_key_secret=rov_server_config.create_client_params['access_key_secret'], region_id=rov_server_config.create_client_params['region_id']) # 1. 获取slb下所有机器 online_instance_ids = utils.get_instance_ids(client=slb_client, slb_id=rov_server_config.slb_id) online_instance_count = len(online_instance_ids) logging.info(f"online instance count: {online_instance_count}.") logging.info(f"online instance ids: {online_instance_ids}") # 2. 扩容机器并启动新服务 扩容数量:线上机器数量/2 logging.info(f"ess instances start ...") ess_instance_count = online_instance_count // 2 logging.info(f"ess instance count: {ess_instance_count}") asyncio.run(ess_instance(create_client=create_client, slb_client=slb_client, ess_count=ess_instance_count, max_workers=2)) logging.info(f"ess instances end!") # # 3. 原有机器进行更新 # logging.info(f"update online instances start ...") # asyncio.run(update_instance(create_client=create_client, slb_client=slb_client, # instance_ids=online_instance_ids, max_workers=2)) # logging.info(f"update online instances end!") # 4. 停止并释放扩容机器 logging.info(f"stop & release instances start ...") remove_instances(create_client=create_client, slb_client=slb_client, instance_ids=ess_instances) logging.info(f"stop & release instances end!") if __name__ == '__main__': main()