# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2023/2/2 """ 创建虚拟站内 UID https://w42nne6hzg.feishu.cn/docx/PhbhdXScYo9CxpxTT3gcOle4nIs """ import os import sys import uuid import requests sys.path.append(os.getcwd()) from common.common import Common from common.db import MysqlHelper class getUser: @classmethod def get_default_user(cls): url = "https://api-internal.piaoquantv.com/user-center/info/getDefaultUserInfo" payload = {"params": {"mid": str(uuid.uuid1())}} headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, json=payload).json() return response['data'] @classmethod def create_uid(cls, log_type, crawler, user_dict, env): """ 创建站内虚拟 UID :param log_type: 日志 :param crawler: 哪款爬虫 :param user_dict: 字典{'nickName': 用户名, 'avatarUrl': 头像, 'tagName': 站内用户标签} :param env: 环境 :return: 站内 UID """ try: if env == 'dev': # 外网 url = 'https://videotest.yishihui.com/longvideoapi/user/virtual/crawler/registerVirtualUser' # 内网 # url = 'http://videotest-internal.yishihui.com/longvideoapi/user/virtual/crawler/registerVirtualUser' elif env == 'prod': # 外网 url = 'https://longvideoapi.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser' # 内网 # url = 'http://longvideoapi-internal.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser' else: # 外网 url = 'https://longvideoapi.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser' # 内网 # url = 'http://longvideoapi-internal.piaoquantv.com/longvideoapi/user/virtual/crawler/registerVirtualUser' params = { # 'count': 1, # (必须)账号个数:传1 # 'accountType': 4, # (必须)账号类型 :传 4 app虚拟账号 'recommendStatus': user_dict.get('recommendStatus', -6), 'appRecommendStatus': user_dict.get('appRecommendStatus', -6), 'pwd': '', # 密码 默认 12346 'nickName': user_dict['nickName'], # 昵称 默认 vuser...... 'avatarUrl': user_dict['avatarUrl'], # 头像Url 默认 http://weapppiccdn.yishihui.com/resources/images/pic_normal.png 'tagName': user_dict['tagName'], # 多条数据用英文逗号分割 } response = requests.post(url=url, params=params) # print(response.text) user_id = response.json()['data'] return user_id except Exception as e: Common.logger(log_type, crawler).error(f"create_user异常:{e}\n") @classmethod def create_user(cls, log_type, crawler, out_user_dict, env, machine): """ 补全飞书用户表信息,并返回 :param log_type: 日志 :param crawler: 哪款爬虫 :param out_user_dict: 站外用户信息字典 :param env: 正式环境:prod,测试环境:dev :param machine: 部署机器,阿里云填写 aliyun,aliyun_hk ,线下分别填写 macpro,macair,local :return: user_list """ try: # 获取站外账号信息 out_uid = out_user_dict['out_uid'] # 站外uid user_name = out_user_dict['user_name'] # 站外用户名 out_avatar_url = out_user_dict['out_avatar_url'] # 站外头像 out_create_time = out_user_dict['out_create_time'] # 站外注册时间,格式: YYYY-MM-DD HH:MM:SS out_tag = out_user_dict['out_tag'] # 站外标签,例如:搞笑博主 out_play_cnt = out_user_dict['out_play_cnt'] # 站外总播放量 out_fans = out_user_dict['out_fans'] # 站外粉丝数量 out_follow = out_user_dict['out_follow'] # 站外关注量 out_friend = out_user_dict['out_friend'] # 站外好友量 out_like = out_user_dict['out_like'] # 站外点赞量 platform = out_user_dict['platform'] # 抓取平台,例如:小年糕、西瓜视频 tag = out_user_dict['tag'] # 站内身份标签,例如:小年糕爬虫,小时榜爬虫策略;好看爬虫,频道榜爬虫策略;youtube爬虫,定向爬虫策略 sql = f""" select * from crawler_user where platform="{platform}" and out_user_id="{out_uid}" """ our_user_info = MysqlHelper.get_values(log_type, crawler, sql, env, machine) # 数据库中(youtube + out_user_id)返回数量 == 0,则创建站内账号UID,并写入定向账号飞书表。并结合站外用户信息,一并写入爬虫账号数据库 if our_user_info is None or len(our_user_info) == 0: # 创建站内账号 create_user_dict = { 'nickName': user_name, 'avatarUrl': out_avatar_url, 'tagName': tag, # 例如 'youtube爬虫,定向爬虫策略' } our_uid = cls.create_uid(log_type, crawler, create_user_dict, env) Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}') if env == 'prod': our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post' else: our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post' Common.logger(log_type, crawler).info(f'站内用户主页链接:{our_user_link}') # 用户信息写入数据库 sql = f""" insert into crawler_user(user_id, out_user_id, out_user_name, out_avatar_url, out_create_time, out_tag, out_play_cnt, out_fans, out_follow, out_friend, out_like, platform, tag) values({our_uid}, "{out_uid}", "{user_name}", "{out_avatar_url}", "{out_create_time}", "{out_tag}", {out_play_cnt}, {out_fans}, {out_follow}, {out_friend}, {out_like}, "{platform}", "{tag}") """ Common.logger(log_type, crawler).info(f'sql:{sql}') MysqlHelper.update_values(log_type, crawler, sql, env, machine) Common.logger(log_type, crawler).info('用户信息插入数据库成功!') # 数据库中(youtube + out_user_id)返回数量 != 0,则直接把数据库中的站内 UID 写入飞书 else: our_uid = our_user_info[0][1] if env == 'prod': our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post' else: our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post' Common.logger(log_type, crawler).info(f'站内用户主页链接:{our_user_link}') user_dict = { 'out_uid': out_uid, 'user_name': user_name, 'our_uid': our_uid, 'our_user_link': our_user_link, } return user_dict except Exception as e: Common.logger(log_type, crawler).error(f"create_user:{e}\n") if __name__ == "__main__": # uid = getUser.create_uid('log', 'kanyikan', 'youtube爬虫,定向爬虫策略', 'dev') # print(uid) pass