# -*- coding: utf-8 -*-
# @Time: 2023/12/26
"""
飞书表配置: token 鉴权 / 增删改查 / 机器人报警
"""
import json
import os
import sys
import requests
import urllib3
from loguru import logger
sys.path.append(os.getcwd())
proxies = {"http": None, "https": None}
class Feishu:
"""
编辑飞书云文档
"""
succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
# 飞书路径token
@classmethod
def spreadsheettoken(cls, crawler):
if crawler == "summary":
return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
else:
return crawler
# 获取飞书api token
@classmethod
def get_token(cls):
"""
获取飞书api token
:return:
"""
url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
"app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
try:
urllib3.disable_warnings()
response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
tenant_access_token = response.json()["tenant_access_token"]
return tenant_access_token
except Exception as e:
logger.error(f"[+] 飞书获取飞书 api token 异常:{e}")
# 获取表格元数据
@classmethod
def get_metainfo(cls, crawler):
"""
获取表格元数据
:return:
"""
try:
get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+ cls.spreadsheettoken(crawler) + "/metainfo"
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
params = {
"extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
"user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
}
urllib3.disable_warnings()
r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
response = json.loads(r.content.decode("utf8"))
return response
except Exception as e:
logger.error(f"[+] 飞书获取表格元数据异常:{e}")
# 读取工作表中所有数据
@classmethod
def get_values_batch(cls, crawler, sheetid):
"""
读取工作表中所有数据
:param crawler: 哪个爬虫
:param sheetid: 哪张表
:return: 所有数据
"""
try:
get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+ cls.spreadsheettoken(crawler) + "/values_batch_get"
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
params = {
"ranges": sheetid,
"valueRenderOption": "ToString",
"dateTimeRenderOption": "",
"user_id_type": "open_id"
}
urllib3.disable_warnings()
r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
response = json.loads(r.content.decode("utf8"))
values = response["data"]["valueRanges"][0]["values"]
return values
except Exception as e:
logger.error(f"[+] 飞书读取工作表所有数据异常:{e}")
# 工作表,插入行或列
@classmethod
def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
"""
工作表插入行或列
:param log_type: 日志路径
:param crawler: 哪个爬虫的云文档
:param sheetid:哪张工作表
:param majordimension:行或者列, ROWS、COLUMNS
:param startindex:开始位置
:param endindex:结束位置
"""
try:
insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+ cls.spreadsheettoken(crawler) + "/insert_dimension_range"
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
body = {
"dimension": {
"sheetId": sheetid,
"majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
"startIndex": startindex, # 开始的位置
"endIndex": endindex # 结束的位置
},
"inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
}
urllib3.disable_warnings()
r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False, timeout=10)
except Exception as e:
logger.error(f"[+] 飞书插入行或列异常:{e}")
# 写入数据
@classmethod
def update_values(cls, crawler, sheetid, ranges, values):
"""
写入数据
:param log_type: 日志路径
:param crawler: 哪个爬虫的云文档
:param sheetid:哪张工作表
:param ranges:单元格范围
:param values:写入的具体数据,list
"""
try:
update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+ cls.spreadsheettoken(crawler) + "/values_batch_update"
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
body = {
"valueRanges": [
{
"range": sheetid + "!" + ranges,
"values": values
},
],
}
urllib3.disable_warnings()
r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False, timeout=10)
except Exception as e:
logger.error(f"[+] 飞书写入数据异常:{e}")
# 读取单元格数据
@classmethod
def get_range_value(cls, crawler, sheetid, cell):
"""
读取单元格内容
:param log_type: 日志路径
:param crawler: 哪个爬虫
:param sheetid: 哪张工作表
:param cell: 哪个单元格
:return: 单元格内容
"""
try:
get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+ cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
params = {
"valueRenderOption": "FormattedValue",
# dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
"dateTimeRenderOption": "",
# 返回的用户id类型,可选open_id,union_id
"user_id_type": "open_id"
}
urllib3.disable_warnings()
r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False, timeout=10)
return r.json()["data"]["valueRange"]["values"][0]
except Exception as e:
logger.error(f"[+] 飞书读取单元格数据异常:{e}")
# 获取表内容
@classmethod
def get_sheet_content(cls, crawler, sheet_id):
try:
sheet = Feishu.get_values_batch(crawler, sheet_id)
content_list = []
for x in sheet:
for y in x:
if y is None:
pass
else:
content_list.append(y)
return content_list
except Exception as e:
logger.error(f"[+] 飞书get_sheet_content:{e}")
# 删除行或列,可选 ROWS、COLUMNS
@classmethod
def dimension_range(cls, crawler, sheetid, major_dimension, startindex, endindex):
"""
删除行或列
:param log_type: 日志路径
:param crawler: 哪个爬虫
:param sheetid:工作表
:param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
:param startindex:开始的位置
:param endindex:结束的位置
:return:
"""
try:
dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+ cls.spreadsheettoken(crawler) + "/dimension_range"
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
body = {
"dimension": {
"sheetId": sheetid,
"majorDimension": major_dimension,
"startIndex": startindex,
"endIndex": endindex
}
}
urllib3.disable_warnings()
r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
except Exception as e:
logger.error(f"[+] 飞书删除视频数据异常:{e}")
# 获取用户 ID
@classmethod
def get_userid(cls, username):
try:
url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
headers = {
"Authorization": "Bearer " + cls.get_token(),
"Content-Type": "application/json; charset=utf-8"
}
name_phone_dict = {
"xinxin": "15546206651",
"muxinyi": "13699208058",
"wangxueke": "13513479926",
"yuzhuoyi": "18624010360",
"luojunhui": "18801281360",
"fanjun": "15200827642",
"zhangyong": "17600025055",
'liukunyu': "18810931977"
}
username = name_phone_dict.get(username)
data = {"mobiles": [username]}
urllib3.disable_warnings()
r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
return open_id
except Exception as e:
logger.error(f"[+] 飞书get_userid异常:{e}")
# 飞书机器人
@classmethod
def bot(cls, log_type, crawler, text, mark_name):
try:
headers = {'Content-Type': 'application/json'}
if crawler == "机器自动改造消息通知":
url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
users = f"{mark_name}"
elif crawler == "快手关键词搜索":
url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
users = "".join([f'{name}' for type, name in
zip(log_type, mark_name)])
# users = f"{mark_name}"
else:
url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
users = f"{mark_name}"
data = json.dumps({
"msg_type": "interactive",
"card": {
"config": {
"wide_screen_mode": True,
"enable_forward": True
},
"elements": [{
"tag": "div",
"text": {
"content": users + text,
"tag": "lark_md"
}
}, {
"actions": [{
"tag": "button",
"text": {
"content": "详情,点击~~~~~",
"tag": "lark_md"
},
"url": sheet_url,
"type": "default",
"value": {}
}],
"tag": "action"
}],
"header": {
"title": {
"content": "📣消息提醒",
"tag": "plain_text"
}
}
}
})
urllib3.disable_warnings()
r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
except Exception as e:
logger.error(f"[+] 飞书bot异常:{e}")
# 飞书机器人-改造计划完成通知
@classmethod
def finish_bot(cls, text, url, content):
try:
headers = {'Content-Type': 'application/json'}
data = json.dumps({
"msg_type": "interactive",
"card": {
"config": {
"wide_screen_mode": True,
"enable_forward": True
},
"elements": [{
"tag": "div",
"text": {
"content": text,
"tag": "lark_md"
}
}],
"header": {
"title": {
"content": content,
"tag": "plain_text"
}
}
}
})
urllib3.disable_warnings()
r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
except Exception as e:
logger.error(f"[+] 飞书bot异常:{e}")
if __name__ == "__main__":
Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')