# -*- coding: utf-8 -*-
# @Author: wangkun
# @Time: 2022/12/22
import json
import requests
import urllib3
from main.common import Common
proxies = {"http": None, "https": None}


class Feishu:
    """
    编辑飞书云文档
    """
    # 看一看爬虫数据表
    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
    # 快手爬虫数据表
    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
    # 微视爬虫数据表
    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
    # 小年糕爬虫数据表
    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
    # 数据监控表
    crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
    # 西瓜视频表
    crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
    # 看到就是福气
    crawler_kdjsfq = 'https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb'

    # 手机号
    wangkun = "13426262515"
    gaonannan = "18501180073"
    xinxin = "15546206651"
    huxinxue = "18832292015"

    # 飞书路径token
    @classmethod
    def spreadsheettoken(cls, crawler):
        """
        :param crawler: 哪个爬虫
        """
        if crawler == "kanyikan":
            return "shtcngRPoDYAi24x52j2nDuHMih"
        elif crawler == "kuaishou":
            # return "shtcnp4SaJt37q6OOOrYzPMjQkg"
            return "shtcnICEfaw9llDNQkKgdymM1xf"
        elif crawler == "weishi":
            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
        elif crawler == "xiaoniangao":
            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
        elif crawler == "monitor":
            return "shtcnlZWYazInhf7Z60jkbLRJyd"
        elif crawler == "xigua":
            return "shtcnvOpx2P8vBXiV91Ot1MKIw8"
        elif crawler == "kdjsfq":
            return "shtcnEokBkIjOUPAk8vbbPKnXgb"

    # 获取飞书api token
    @classmethod
    def get_token(cls, log_type):
        """
        获取飞书api token
        :return:
        """
        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}

        try:
            urllib3.disable_warnings()
            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
            tenant_access_token = response.json()["tenant_access_token"]
            return tenant_access_token
        except Exception as e:
            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)

    # 获取表格元数据
    @classmethod
    def get_metainfo(cls, log_type, crawler):
        """
        获取表格元数据
        :return:
        """
        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                           + cls.spreadsheettoken(crawler) + "/metainfo"

        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        params = {
            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
        }
        try:
            urllib3.disable_warnings()
            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
            response = json.loads(r.content.decode("utf8"))
            return response
        except Exception as e:
            Common.logger(log_type).error("获取表格元数据异常:{}", e)

    # 读取工作表中所有数据
    @classmethod
    def get_values_batch(cls, log_type, crawler, sheetid):
        """
        读取工作表中所有数据
        :param log_type: 启用哪个 log
        :param crawler: 哪个爬虫
        :param sheetid: 哪张表
        :return: 所有数据
        """
        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        params = {
            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
            "ranges": sheetid,

            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
            # valueRenderOption=FormattedValue 计算并格式化单元格;
            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
            "valueRenderOption": "ToString",

            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
            "dateTimeRenderOption": "",

            # 返回的用户id类型,可选open_id,union_id
            "user_id_type": "open_id"
        }
        try:
            urllib3.disable_warnings()
            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
            # print(r.text)
            response = json.loads(r.content.decode("utf8"))
            values = response["data"]["valueRanges"][0]["values"]
            return values
        except Exception as e:
            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)

    # 工作表,插入行或列
    @classmethod
    def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
        """
        工作表插入行或列
        :param log_type: 日志路径
        :param crawler: 哪个爬虫的云文档
        :param sheetid:哪张工作表
        :param majordimension:行或者列, ROWS、COLUMNS
        :param startindex:开始位置
        :param endindex:结束位置
        """
        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                             + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        body = {
            "dimension": {
                "sheetId": sheetid,
                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
                "startIndex": startindex,  # 开始的位置
                "endIndex": endindex  # 结束的位置
            },
            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
        }
        try:
            urllib3.disable_warnings()
            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("插入行或列异常:{}", e)

    # 写入数据
    @classmethod
    def update_values(cls, log_type, crawler, sheetid, ranges, values):
        """
        写入数据
        :param log_type: 日志路径
        :param crawler: 哪个爬虫的云文档
        :param sheetid:哪张工作表
        :param ranges:单元格范围
        :param values:写入的具体数据,list
        """
        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                            + cls.spreadsheettoken(crawler) + "/values_batch_update"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        body = {
            "valueRanges": [
                {
                    "range": sheetid + "!" + ranges,
                    "values": values
                },
            ],
        }

        try:
            urllib3.disable_warnings()
            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("写入数据异常:{}", e)

    # 合并单元格
    @classmethod
    def merge_cells(cls, log_type, crawler, sheetid, ranges):
        """
        合并单元格
        :param log_type: 日志路径
        :param crawler: 哪个爬虫
        :param sheetid:哪张工作表
        :param ranges:需要合并的单元格范围
        """
        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                          + cls.spreadsheettoken(crawler) + "/merge_cells"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }

        body = {
            "range": sheetid + "!" + ranges,
            "mergeType": "MERGE_ROWS"
        }

        try:
            urllib3.disable_warnings()
            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("合并单元格异常:{}", e)

    # 读取单元格数据
    @classmethod
    def get_range_value(cls, log_type, crawler, sheetid, cell):
        """
        读取单元格内容
        :param log_type: 日志路径
        :param crawler: 哪个爬虫
        :param sheetid: 哪张工作表
        :param cell: 哪个单元格
        :return: 单元格内容
        """
        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                              + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        params = {
            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
            # valueRenderOption=FormattedValue 计算并格式化单元格;
            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
            "valueRenderOption": "FormattedValue",

            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
            "dateTimeRenderOption": "",

            # 返回的用户id类型,可选open_id,union_id
            "user_id_type": "open_id"
        }
        try:
            urllib3.disable_warnings()
            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
            # print(r.text)
            return r.json()["data"]["valueRange"]["values"][0]
        except Exception as e:
            Common.logger(log_type).error("读取单元格数据异常:{}", e)

    # 删除行或列,可选 ROWS、COLUMNS
    @classmethod
    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
        """
        删除行或列
        :param log_type: 日志路径
        :param crawler: 哪个爬虫
        :param sheetid:工作表
        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
        :param startindex:开始的位置
        :param endindex:结束的位置
        :return:
        """
        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                              + cls.spreadsheettoken(crawler) + "/dimension_range"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        body = {
            "dimension": {
                "sheetId": sheetid,
                "majorDimension": major_dimension,
                "startIndex": startindex,
                "endIndex": endindex
            }
        }
        try:
            urllib3.disable_warnings()
            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("删除视频数据异常:{}", e)

    # 获取用户 ID
    @classmethod
    def get_userid(cls, log_type, username):
        try:
            url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
            headers = {
                "Authorization": "Bearer " + cls.get_token(log_type),
                "Content-Type": "application/json; charset=utf-8"
            }
            if username == "wangkun":
                username = cls.wangkun
            elif username == "gaonannan":
                username = cls.gaonannan
            elif username == "xinxin":
                username = cls.xinxin
            elif username == "huxinxue":
                username = cls.huxinxue
            data = {"mobiles": [username]}
            urllib3.disable_warnings()
            r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
            open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
            Common.logger(log_type).info("{}:{}", username, open_id)
            # print(f"{username}:{open_id}")
            return open_id
        except Exception as e:
            Common.logger(log_type).error("get_userid异常:{}", e)

    # 飞书机器人
    @classmethod
    def bot(cls, log_type, content):
        try:
            url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
            headers = {
                'Content-Type': 'application/json'
            }
            data = json.dumps({
                "msg_type": "interactive",
                "card": {
                    "config": {
                        "wide_screen_mode": True,
                        "enable_forward": True
                    },
                    "elements": [{
                        "tag": "div",
                        "text": {
                            "content": "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n" + content,
                            "tag": "lark_md"
                        }
                    }, {
                        "actions": [{
                            "tag": "button",
                            "text": {
                                "content": "快手爬虫表",
                                "tag": "lark_md"
                            },
                            "url": "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf",
                            "type": "default",
                            "value": {}
                        },
                            {
                                "tag": "button",
                                "text": {
                                    "content": "快手Jenkins",
                                    "tag": "lark_md"
                                },
                                "url": "https://jenkins-on.yishihui.com/view/%E7%88%AC%E8%99%AB-Spider/job/%E5%BF%"
                                       "AB%E6%89%8B%E5%B0%8F%E7%A8%8B%E5%BA%8F-%E8%A7%86%E9%A2%91%E7%88%AC%E5%8F%96/",
                                "type": "default",
                                "value": {}
                            }

                        ],
                        "tag": "action"
                    }],
                    "header": {
                        "title": {
                            "content": "📣有新的报警,请注意查处",
                            "tag": "plain_text"
                        }
                    }
                }
            })
            urllib3.disable_warnings()
            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
            Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
        except Exception as e:
            Common.logger(log_type).error("bot异常:{}", e)


if __name__ == "__main__":
    Feishu.bot("kuaishou", "我是快手测试内容,请忽略")
    pass