feishu.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/12/26
  3. """
  4. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  5. """
  6. import json
  7. import os
  8. import sys
  9. import requests
  10. import urllib3
  11. sys.path.append(os.getcwd())
  12. from common.common import Common
  13. proxies = {"http": None, "https": None}
  14. class Feishu:
  15. """
  16. 编辑飞书云文档
  17. """
  18. succinct_url = "https://w42nne6hzg.feishu.cn/sheets/CPDNs06R2hux6SthZ1wcQmkAnYg?"
  19. # 飞书路径token
  20. @classmethod
  21. def spreadsheettoken(cls, crawler):
  22. if crawler == "succinct":
  23. return "CPDNs06R2hux6SthZ1wcQmkAnYg"
  24. # 获取飞书api token
  25. @classmethod
  26. def get_token(cls, log_type, crawler, action=""):
  27. """
  28. 获取飞书api token
  29. :return:
  30. """
  31. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  32. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  33. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  34. try:
  35. urllib3.disable_warnings()
  36. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  37. tenant_access_token = response.json()["tenant_access_token"]
  38. return tenant_access_token
  39. except Exception as e:
  40. Common.logger("feishu").error("获取飞书 api token 异常:{}", e)
  41. # 获取表格元数据
  42. @classmethod
  43. def get_metainfo(cls, log_type, crawler):
  44. """
  45. 获取表格元数据
  46. :return:
  47. """
  48. try:
  49. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  50. + cls.spreadsheettoken(crawler) + "/metainfo"
  51. headers = {
  52. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  53. "Content-Type": "application/json; charset=utf-8"
  54. }
  55. params = {
  56. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  57. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  58. }
  59. urllib3.disable_warnings()
  60. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  61. response = json.loads(r.content.decode("utf8"))
  62. return response
  63. except Exception as e:
  64. Common.logger("feishu").error("获取表格元数据异常:{}", e)
  65. # 读取工作表中所有数据
  66. @classmethod
  67. def get_values_batch(cls, log_type, crawler, sheetid):
  68. """
  69. 读取工作表中所有数据
  70. :param log_type: 启用哪个 log
  71. :param crawler: 哪个爬虫
  72. :param sheetid: 哪张表
  73. :return: 所有数据
  74. """
  75. try:
  76. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  77. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  78. headers = {
  79. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  80. "Content-Type": "application/json; charset=utf-8"
  81. }
  82. params = {
  83. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  84. "ranges": sheetid,
  85. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  86. # valueRenderOption=FormattedValue 计算并格式化单元格;
  87. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  88. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  89. "valueRenderOption": "ToString",
  90. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  91. "dateTimeRenderOption": "",
  92. # 返回的用户id类型,可选open_id,union_id
  93. "user_id_type": "open_id"
  94. }
  95. urllib3.disable_warnings()
  96. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  97. # print(r.text)
  98. response = json.loads(r.content.decode("utf8"))
  99. values = response["data"]["valueRanges"][0]["values"]
  100. return values
  101. except Exception as e:
  102. Common.logger("feishu").error("读取工作表所有数据异常:{}", e)
  103. # 工作表,插入行或列
  104. @classmethod
  105. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  106. """
  107. 工作表插入行或列
  108. :param log_type: 日志路径
  109. :param crawler: 哪个爬虫的云文档
  110. :param sheetid:哪张工作表
  111. :param majordimension:行或者列, ROWS、COLUMNS
  112. :param startindex:开始位置
  113. :param endindex:结束位置
  114. """
  115. try:
  116. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  117. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  118. headers = {
  119. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  120. "Content-Type": "application/json; charset=utf-8"
  121. }
  122. body = {
  123. "dimension": {
  124. "sheetId": sheetid,
  125. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  126. "startIndex": startindex, # 开始的位置
  127. "endIndex": endindex # 结束的位置
  128. },
  129. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  130. }
  131. urllib3.disable_warnings()
  132. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  133. Common.logger("feishu").info("插入行或列:{}", r.json()["msg"])
  134. except Exception as e:
  135. Common.logger("feishu").error("插入行或列异常:{}", e)
  136. # 写入数据
  137. @classmethod
  138. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  139. """
  140. 写入数据
  141. :param log_type: 日志路径
  142. :param crawler: 哪个爬虫的云文档
  143. :param sheetid:哪张工作表
  144. :param ranges:单元格范围
  145. :param values:写入的具体数据,list
  146. """
  147. try:
  148. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  149. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  150. headers = {
  151. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  152. "Content-Type": "application/json; charset=utf-8"
  153. }
  154. body = {
  155. "valueRanges": [
  156. {
  157. "range": sheetid + "!" + ranges,
  158. "values": values
  159. },
  160. ],
  161. }
  162. urllib3.disable_warnings()
  163. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  164. Common.logger("feishu").info("写入数据:{}", r.json()["msg"])
  165. except Exception as e:
  166. Common.logger("feishu").error("写入数据异常:{}", e)
  167. # 合并单元格
  168. @classmethod
  169. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  170. """
  171. 合并单元格
  172. :param log_type: 日志路径
  173. :param crawler: 哪个爬虫
  174. :param sheetid:哪张工作表
  175. :param ranges:需要合并的单元格范围
  176. """
  177. try:
  178. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  179. + cls.spreadsheettoken(crawler) + "/merge_cells"
  180. headers = {
  181. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  182. "Content-Type": "application/json; charset=utf-8"
  183. }
  184. body = {
  185. "range": sheetid + "!" + ranges,
  186. "mergeType": "MERGE_ROWS"
  187. }
  188. urllib3.disable_warnings()
  189. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  190. Common.logger("feishu").info("合并单元格:{}", r.json()["msg"])
  191. except Exception as e:
  192. Common.logger("feishu").error("合并单元格异常:{}", e)
  193. # 读取单元格数据
  194. @classmethod
  195. def get_range_value(cls, log_type, crawler, sheetid, cell):
  196. """
  197. 读取单元格内容
  198. :param log_type: 日志路径
  199. :param crawler: 哪个爬虫
  200. :param sheetid: 哪张工作表
  201. :param cell: 哪个单元格
  202. :return: 单元格内容
  203. """
  204. try:
  205. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  206. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  207. headers = {
  208. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  209. "Content-Type": "application/json; charset=utf-8"
  210. }
  211. params = {
  212. "valueRenderOption": "FormattedValue",
  213. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  214. "dateTimeRenderOption": "",
  215. # 返回的用户id类型,可选open_id,union_id
  216. "user_id_type": "open_id"
  217. }
  218. urllib3.disable_warnings()
  219. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  220. # print(r.text)
  221. return r.json()["data"]["valueRange"]["values"][0]
  222. except Exception as e:
  223. Common.logger("feishu").error("读取单元格数据异常:{}", e)
  224. # 获取表内容
  225. @classmethod
  226. def get_sheet_content(cls, log_type, crawler, sheet_id):
  227. try:
  228. sheet = Feishu.get_values_batch(log_type, crawler, sheet_id)
  229. content_list = []
  230. for x in sheet:
  231. for y in x:
  232. if y is None:
  233. pass
  234. else:
  235. content_list.append(y)
  236. return content_list
  237. except Exception as e:
  238. Common.logger("feishu").error(f'get_sheet_content:{e}\n')
  239. # 删除行或列,可选 ROWS、COLUMNS
  240. @classmethod
  241. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  242. """
  243. 删除行或列
  244. :param log_type: 日志路径
  245. :param crawler: 哪个爬虫
  246. :param sheetid:工作表
  247. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  248. :param startindex:开始的位置
  249. :param endindex:结束的位置
  250. :return:
  251. """
  252. try:
  253. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  254. + cls.spreadsheettoken(crawler) + "/dimension_range"
  255. headers = {
  256. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  257. "Content-Type": "application/json; charset=utf-8"
  258. }
  259. body = {
  260. "dimension": {
  261. "sheetId": sheetid,
  262. "majorDimension": major_dimension,
  263. "startIndex": startindex,
  264. "endIndex": endindex
  265. }
  266. }
  267. urllib3.disable_warnings()
  268. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  269. Common.logger("feishu").info("删除视频数据:{}", r.json()["msg"])
  270. except Exception as e:
  271. Common.logger("feishu").error("删除视频数据异常:{}", e)
  272. # 获取用户 ID
  273. @classmethod
  274. def get_userid(cls, log_type, crawler, username):
  275. try:
  276. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  277. headers = {
  278. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  279. "Content-Type": "application/json; charset=utf-8"
  280. }
  281. name_phone_dict = {
  282. "xinxin": "15546206651",
  283. "muxinyi": "13699208058",
  284. "wangxueke": "13513479926",
  285. "yuzhuoyi": "18624010360",
  286. "luojunhui": "18801281360",
  287. "fanjun": "15200827642",
  288. "zhangyong": "17600025055"
  289. }
  290. username = name_phone_dict.get(username)
  291. data = {"mobiles": [username]}
  292. urllib3.disable_warnings()
  293. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  294. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  295. return open_id
  296. except Exception as e:
  297. Common.logger("feishu").error(f"get_userid异常:{e}\n")
  298. # 飞书机器人
  299. @classmethod
  300. def bot(cls, log_type, crawler, text):
  301. try:
  302. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  303. headers = {'Content-Type': 'application/json'}
  304. if crawler == "抖音":
  305. content = "抖音cookie过期"
  306. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/CPDNs06R2hux6SthZ1wcQmkAnYg?sheet=OpE35G"
  307. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangxueke")) + "></at> <at id=" + str(
  308. cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
  309. elif crawler == "管理后台":
  310. content = "管理后台cookie过期"
  311. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/CPDNs06R2hux6SthZ1wcQmkAnYg?sheet=OpE35G"
  312. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangxueke")) + "></at> <at id=" + str(
  313. cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
  314. elif crawler == "快手":
  315. content = "快手cookie过期"
  316. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/CPDNs06R2hux6SthZ1wcQmkAnYg?sheet=OpE35G"
  317. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangxueke")) + "></at> <at id=" + str(
  318. cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
  319. elif crawler == "拼接视频":
  320. content = text
  321. sheet_url = ""
  322. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangxueke")) + "></at> <at id=" + str(
  323. cls.get_userid(log_type, crawler, "muxinyi")) + "></at> <at id=" + str(
  324. cls.get_userid(log_type, crawler, "zhangyong")) + "></at>\n"
  325. data = json.dumps({
  326. "msg_type": "interactive",
  327. "card": {
  328. "config": {
  329. "wide_screen_mode": True,
  330. "enable_forward": True
  331. },
  332. "elements": [{
  333. "tag": "div",
  334. "text": {
  335. "content": users + text,
  336. "tag": "lark_md"
  337. }
  338. }, {
  339. "actions": [{
  340. "tag": "button",
  341. "text": {
  342. "content": content,
  343. "tag": "lark_md"
  344. },
  345. "url": sheet_url,
  346. "type": "default",
  347. "value": {}
  348. }],
  349. "tag": "action"
  350. }],
  351. "header": {
  352. "title": {
  353. "content": "📣您有新的信息,请注意查收",
  354. "tag": "plain_text"
  355. }
  356. }
  357. }
  358. })
  359. urllib3.disable_warnings()
  360. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  361. Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
  362. except Exception as e:
  363. Common.logger("feishu").error(f"bot异常:{e}\n")
  364. if __name__ == "__main__":
  365. Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')