# @Author: wangkun
# @Time: 3月 08, 2022
import json
import requests
import urllib3
# from crawler_shipinhao.main.common import Common
from main.common import Common
proxies = {"http": None, "https": None}


class Feishu:
    """
    编辑飞书云文档
    """
    # 看一看爬虫数据表
    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
    # 快手爬虫数据表
    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
    # 微视爬虫数据表
    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
    # 小年糕爬虫数据表
    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
    # 数据监控表
    crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
    # 本山祝福数据表
    crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
    # 公众号爬虫表
    gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
    # 视频号表
    shipinhao_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?'

    # 飞书路径token
    @classmethod
    def spreadsheettoken(cls, crawler):
        """
        :param crawler: 哪个爬虫
        """
        if crawler == "kanyikan":
            return "shtcngRPoDYAi24x52j2nDuHMih"
        elif crawler == "kuaishou":
            return "shtcnp4SaJt37q6OOOrYzPMjQkg"
        elif crawler == "weishi":
            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
        elif crawler == "xiaoniangao":
            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
        elif crawler == "monitor":
            return "shtcnlZWYazInhf7Z60jkbLRJyd"
        elif crawler == "bszf":
            return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
        elif crawler == "gzh":
            return "shtcnexNXnpDLHhARw0QdiwbYuA"
        elif crawler == "shipinhao":
            return "shtcn9rOdZRAGFbRkWpn7hqEHGc"

    # 获取飞书api token
    @classmethod
    def get_token(cls, log_type):
        """
        获取飞书api token
        :return:
        """
        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}

        try:
            urllib3.disable_warnings()
            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
            tenant_access_token = response.json()["tenant_access_token"]
            return tenant_access_token
        except Exception as e:
            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)

    # 获取表格元数据
    @classmethod
    def get_metainfo(cls, log_type, crawler):
        """
        获取表格元数据
        :return:
        """
        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                           + cls.spreadsheettoken(crawler) + "/metainfo"

        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        params = {
            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
        }
        try:
            urllib3.disable_warnings()
            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
            response = json.loads(r.content.decode("utf8"))
            return response
        except Exception as e:
            Common.logger(log_type).error("获取表格元数据异常:{}", e)

    # 读取工作表中所有数据
    @classmethod
    def get_values_batch(cls, log_type, crawler, sheetid):
        """
        读取工作表中所有数据
        :param log_type: 启用哪个 log
        :param crawler: 哪个爬虫
        :param sheetid: 哪张表
        :return: 所有数据
        """
        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        params = {
            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
            "ranges": sheetid,

            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
            # valueRenderOption=FormattedValue 计算并格式化单元格;
            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
            "valueRenderOption": "ToString",

            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
            "dateTimeRenderOption": "",

            # 返回的用户id类型,可选open_id,union_id
            "user_id_type": "open_id"
        }
        try:
            urllib3.disable_warnings()
            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
            # print(r.text)
            response = json.loads(r.content.decode("utf8"))
            values = response["data"]["valueRanges"][0]["values"]
            return values
        except Exception as e:
            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)

    # 工作表,插入行或列
    @classmethod
    def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
        """
        工作表插入行或列
        :param log_type: 日志路径
        :param crawler: 哪个爬虫的云文档
        :param sheetid:哪张工作表
        :param majordimension:行或者列, ROWS、COLUMNS
        :param startindex:开始位置
        :param endindex:结束位置
        """
        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                             + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        body = {
            "dimension": {
                "sheetId": sheetid,
                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
                "startIndex": startindex,  # 开始的位置
                "endIndex": endindex  # 结束的位置
            },
            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
        }
        try:
            urllib3.disable_warnings()
            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("插入行或列异常:{}", e)

    # 写入数据
    @classmethod
    def update_values(cls, log_type, crawler, sheetid, ranges, values):
        """
        写入数据
        :param log_type: 日志路径
        :param crawler: 哪个爬虫的云文档
        :param sheetid:哪张工作表
        :param ranges:单元格范围
        :param values:写入的具体数据,list
        """
        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                            + cls.spreadsheettoken(crawler) + "/values_batch_update"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        body = {
            "valueRanges": [
                {
                    "range": sheetid + "!" + ranges,
                    "values": values
                },
            ],
        }

        try:
            urllib3.disable_warnings()
            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("写入数据异常:{}", e)

    # 合并单元格
    @classmethod
    def merge_cells(cls, log_type, crawler, sheetid, ranges):
        """
        合并单元格
        :param log_type: 日志路径
        :param crawler: 哪个爬虫
        :param sheetid:哪张工作表
        :param ranges:需要合并的单元格范围
        """
        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                          + cls.spreadsheettoken(crawler) + "/merge_cells"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }

        body = {
            "range": sheetid + "!" + ranges,
            "mergeType": "MERGE_ROWS"
        }

        try:
            urllib3.disable_warnings()
            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("合并单元格异常:{}", e)

    # 读取单元格数据
    @classmethod
    def get_range_value(cls, log_type, crawler, sheetid, cell):
        """
        读取单元格内容
        :param log_type: 日志路径
        :param crawler: 哪个爬虫
        :param sheetid: 哪张工作表
        :param cell: 哪个单元格
        :return: 单元格内容
        """
        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                              + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        params = {
            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
            # valueRenderOption=FormattedValue 计算并格式化单元格;
            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
            "valueRenderOption": "FormattedValue",

            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
            "dateTimeRenderOption": "",

            # 返回的用户id类型,可选open_id,union_id
            "user_id_type": "open_id"
        }
        try:
            urllib3.disable_warnings()
            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
            # print(r.text)
            return r.json()["data"]["valueRange"]["values"][0]
        except Exception as e:
            Common.logger(log_type).error("读取单元格数据异常:{}", e)

    # 删除行或列,可选 ROWS、COLUMNS
    @classmethod
    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
        """
        删除行或列
        :param log_type: 日志路径
        :param crawler: 哪个爬虫
        :param sheetid:工作表
        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
        :param startindex:开始的位置
        :param endindex:结束的位置
        :return:
        """
        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
                              + cls.spreadsheettoken(crawler) + "/dimension_range"
        headers = {
            "Authorization": "Bearer " + cls.get_token(log_type),
            "Content-Type": "application/json; charset=utf-8"
        }
        body = {
            "dimension": {
                "sheetId": sheetid,
                "majorDimension": major_dimension,
                "startIndex": startindex,
                "endIndex": endindex
                }
            }
        try:
            urllib3.disable_warnings()
            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
        except Exception as e:
            Common.logger(log_type).error("删除视频数据异常:{}", e)


if __name__ == "__main__":
    print(Feishu.get_token('shipinhao'))

    pass