feishu.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. # -*- coding: utf-8 -*-
  2. """
  3. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  4. """
  5. import json
  6. import os
  7. import sys
  8. import requests
  9. import urllib3
  10. sys.path.append(os.getcwd())
  11. from common import Common
  12. proxies = {"http": None, "https": None}
  13. class Feishu:
  14. """
  15. 编辑飞书云文档
  16. """
  17. succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
  18. # 飞书路径token
  19. @classmethod
  20. def spreadsheettoken(cls, crawler):
  21. if crawler == "summary":
  22. return "IbVVsKCpbhxhSJtwYOUc8S1jnWb"
  23. else:
  24. return crawler
  25. # 获取飞书api token
  26. @classmethod
  27. def get_token(cls):
  28. """
  29. 获取飞书api token
  30. :return:
  31. """
  32. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  33. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  34. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  35. try:
  36. urllib3.disable_warnings()
  37. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  38. tenant_access_token = response.json()["tenant_access_token"]
  39. return tenant_access_token
  40. except Exception as e:
  41. Common.logger("feishu").error("获取飞书 api token 异常:{}", e)
  42. # 获取表格元数据
  43. @classmethod
  44. def get_metainfo(cls, crawler):
  45. """
  46. 获取表格元数据
  47. :return:
  48. """
  49. try:
  50. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  51. + cls.spreadsheettoken(crawler) + "/metainfo"
  52. headers = {
  53. "Authorization": "Bearer " + cls.get_token(),
  54. "Content-Type": "application/json; charset=utf-8"
  55. }
  56. params = {
  57. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  58. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  59. }
  60. urllib3.disable_warnings()
  61. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  62. response = json.loads(r.content.decode("utf8"))
  63. return response
  64. except Exception as e:
  65. Common.logger("feishu").error("获取表格元数据异常:{}", e)
  66. # 读取工作表中所有数据
  67. @classmethod
  68. def get_values_batch(cls, crawler, sheetid):
  69. """
  70. 读取工作表中所有数据
  71. :param crawler: 哪个爬虫
  72. :param sheetid: 哪张表
  73. :return: 所有数据
  74. """
  75. try:
  76. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  77. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  78. headers = {
  79. "Authorization": "Bearer " + cls.get_token(),
  80. "Content-Type": "application/json; charset=utf-8"
  81. }
  82. params = {
  83. "ranges": sheetid,
  84. "valueRenderOption": "ToString",
  85. "dateTimeRenderOption": "",
  86. "user_id_type": "open_id"
  87. }
  88. urllib3.disable_warnings()
  89. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  90. response = json.loads(r.content.decode("utf8"))
  91. values = response["data"]["valueRanges"][0]["values"]
  92. return values
  93. except Exception as e:
  94. Common.logger("feishu").error("读取工作表所有数据异常:{}", e)
  95. # 工作表,插入行或列
  96. @classmethod
  97. def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
  98. """
  99. 工作表插入行或列
  100. :param log_type: 日志路径
  101. :param crawler: 哪个爬虫的云文档
  102. :param sheetid:哪张工作表
  103. :param majordimension:行或者列, ROWS、COLUMNS
  104. :param startindex:开始位置
  105. :param endindex:结束位置
  106. """
  107. try:
  108. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  109. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  110. headers = {
  111. "Authorization": "Bearer " + cls.get_token(),
  112. "Content-Type": "application/json; charset=utf-8"
  113. }
  114. body = {
  115. "dimension": {
  116. "sheetId": sheetid,
  117. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  118. "startIndex": startindex, # 开始的位置
  119. "endIndex": endindex # 结束的位置
  120. },
  121. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  122. }
  123. urllib3.disable_warnings()
  124. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  125. Common.logger("feishu").info("插入行或列:{}", r.json()["msg"])
  126. except Exception as e:
  127. Common.logger("feishu").error("插入行或列异常:{}", e)
  128. # 写入数据
  129. @classmethod
  130. def update_values(cls, crawler, sheetid, ranges, values):
  131. """
  132. 写入数据
  133. :param log_type: 日志路径
  134. :param crawler: 哪个爬虫的云文档
  135. :param sheetid:哪张工作表
  136. :param ranges:单元格范围
  137. :param values:写入的具体数据,list
  138. """
  139. try:
  140. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  141. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  142. headers = {
  143. "Authorization": "Bearer " + cls.get_token(),
  144. "Content-Type": "application/json; charset=utf-8"
  145. }
  146. body = {
  147. "valueRanges": [
  148. {
  149. "range": sheetid + "!" + ranges,
  150. "values": values
  151. },
  152. ],
  153. }
  154. urllib3.disable_warnings()
  155. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  156. Common.logger("feishu").info("写入数据:{}", r.json()["msg"])
  157. except Exception as e:
  158. Common.logger("feishu").error("写入数据异常:{}", e)
  159. # 合并单元格
  160. @classmethod
  161. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  162. """
  163. 合并单元格
  164. :param log_type: 日志路径
  165. :param crawler: 哪个爬虫
  166. :param sheetid:哪张工作表
  167. :param ranges:需要合并的单元格范围
  168. """
  169. try:
  170. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  171. + cls.spreadsheettoken(crawler) + "/merge_cells"
  172. headers = {
  173. "Authorization": "Bearer " + cls.get_token(),
  174. "Content-Type": "application/json; charset=utf-8"
  175. }
  176. body = {
  177. "range": sheetid + "!" + ranges,
  178. "mergeType": "MERGE_ROWS"
  179. }
  180. urllib3.disable_warnings()
  181. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  182. Common.logger("feishu").info("合并单元格:{}", r.json()["msg"])
  183. except Exception as e:
  184. Common.logger("feishu").error("合并单元格异常:{}", e)
  185. # 读取单元格数据
  186. @classmethod
  187. def get_range_value(cls, crawler, sheetid, cell):
  188. """
  189. 读取单元格内容
  190. :param log_type: 日志路径
  191. :param crawler: 哪个爬虫
  192. :param sheetid: 哪张工作表
  193. :param cell: 哪个单元格
  194. :return: 单元格内容
  195. """
  196. try:
  197. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  198. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  199. headers = {
  200. "Authorization": "Bearer " + cls.get_token(),
  201. "Content-Type": "application/json; charset=utf-8"
  202. }
  203. params = {
  204. "valueRenderOption": "FormattedValue",
  205. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  206. "dateTimeRenderOption": "",
  207. # 返回的用户id类型,可选open_id,union_id
  208. "user_id_type": "open_id"
  209. }
  210. urllib3.disable_warnings()
  211. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  212. # print(r.text)
  213. return r.json()["data"]["valueRange"]["values"][0]
  214. except Exception as e:
  215. Common.logger("feishu").error("读取单元格数据异常:{}", e)
  216. # 获取表内容
  217. @classmethod
  218. def get_sheet_content(cls, crawler, sheet_id):
  219. try:
  220. sheet = Feishu.get_values_batch(crawler, sheet_id)
  221. content_list = []
  222. for x in sheet:
  223. for y in x:
  224. if y is None:
  225. pass
  226. else:
  227. content_list.append(y)
  228. return content_list
  229. except Exception as e:
  230. Common.logger("feishu").error(f'get_sheet_content:{e}\n')
  231. # 删除行或列,可选 ROWS、COLUMNS
  232. @classmethod
  233. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  234. """
  235. 删除行或列
  236. :param log_type: 日志路径
  237. :param crawler: 哪个爬虫
  238. :param sheetid:工作表
  239. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  240. :param startindex:开始的位置
  241. :param endindex:结束的位置
  242. :return:
  243. """
  244. try:
  245. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  246. + cls.spreadsheettoken(crawler) + "/dimension_range"
  247. headers = {
  248. "Authorization": "Bearer " + cls.get_token(),
  249. "Content-Type": "application/json; charset=utf-8"
  250. }
  251. body = {
  252. "dimension": {
  253. "sheetId": sheetid,
  254. "majorDimension": major_dimension,
  255. "startIndex": startindex,
  256. "endIndex": endindex
  257. }
  258. }
  259. urllib3.disable_warnings()
  260. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  261. Common.logger("feishu").info("删除视频数据:{}", r.json()["msg"])
  262. except Exception as e:
  263. Common.logger("feishu").error("删除视频数据异常:{}", e)
  264. # 获取用户 ID
  265. @classmethod
  266. def get_userid(cls, log_type, crawler, username):
  267. try:
  268. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  269. headers = {
  270. "Authorization": "Bearer " + cls.get_token(),
  271. "Content-Type": "application/json; charset=utf-8"
  272. }
  273. name_phone_dict = {
  274. "xinxin": "15546206651",
  275. "muxinyi": "13699208058",
  276. "wangxueke": "13513479926",
  277. "yuzhuoyi": "18624010360",
  278. "luojunhui": "18801281360",
  279. "fanjun": "15200827642",
  280. "zhangyong": "17600025055"
  281. }
  282. username = name_phone_dict.get(username)
  283. data = {"mobiles": [username]}
  284. urllib3.disable_warnings()
  285. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  286. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  287. return open_id
  288. except Exception as e:
  289. Common.logger("feishu").error(f"get_userid异常:{e}\n")
  290. # 飞书机器人
  291. @classmethod
  292. def bot(cls, log_type, crawler, text, mark, mark_name):
  293. try:
  294. # url = "https://open.feishu.cn/open-apis/bot/v2/hook/5a6ce4ca-32fa-44fe-bbe4-69ae369bb3cf"
  295. # url = "https://open.feishu.cn/open-apis/bot/v2/hook/2b317db6-93ed-43b4-bf01-03c35cfa1d59"
  296. url = "https://open.feishu.cn/open-apis/bot/v2/hook/af368a84-545f-4106-9c4c-af64678ad7af"
  297. headers = {'Content-Type': 'application/json'}
  298. if crawler == "抖音":
  299. content = "抖音cookie过期"
  300. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
  301. users = "<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
  302. elif crawler == "管理后台":
  303. content = "管理后台cookie过期"
  304. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
  305. users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
  306. elif crawler == "快手":
  307. content = "快手cookie过期"
  308. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
  309. users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
  310. elif crawler == "AGC视频":
  311. content = 'AGC视频生成条数详情'
  312. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
  313. users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at> \n"
  314. elif crawler == "AGC完成通知":
  315. content = "今日所有AGC视频完成啦~~~"
  316. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
  317. users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
  318. else:
  319. content = "今日所有AGC视频完成啦~~~"
  320. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
  321. users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
  322. data = json.dumps({
  323. "msg_type": "interactive",
  324. "card": {
  325. "config": {
  326. "wide_screen_mode": True,
  327. "enable_forward": True
  328. },
  329. "elements": [{
  330. "tag": "div",
  331. "text": {
  332. "content": users + text,
  333. "tag": "lark_md"
  334. }
  335. }, {
  336. "actions": [{
  337. "tag": "button",
  338. "text": {
  339. "content": content,
  340. "tag": "lark_md"
  341. },
  342. "url": sheet_url,
  343. "type": "default",
  344. "value": {}
  345. }],
  346. "tag": "action"
  347. }],
  348. "header": {
  349. "title": {
  350. "content": "📣您有新的信息,请注意查收",
  351. "tag": "plain_text"
  352. }
  353. }
  354. }
  355. })
  356. urllib3.disable_warnings()
  357. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  358. Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
  359. except Exception as e:
  360. Common.logger("feishu").error(f"bot异常:{e}\n")
  361. if __name__ == "__main__":
  362. Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')