feishu_utils.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/12/26
  3. """
  4. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  5. """
  6. import json
  7. import os
  8. import sys
  9. import requests
  10. import urllib3
  11. from loguru import logger
  12. sys.path.append(os.getcwd())
  13. proxies = {"http": None, "https": None}
  14. class Feishu:
  15. """
  16. 编辑飞书云文档
  17. """
  18. succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
  19. # 飞书路径token
  20. @classmethod
  21. def spreadsheettoken(cls, crawler):
  22. if crawler == "summary":
  23. return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
  24. else:
  25. return crawler
  26. # 获取飞书api token
  27. @classmethod
  28. def get_token(cls):
  29. """
  30. 获取飞书api token
  31. :return:
  32. """
  33. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  34. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  35. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  36. urllib3.disable_warnings()
  37. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  38. tenant_access_token = response.json()["tenant_access_token"]
  39. return tenant_access_token
  40. # 获取表格元数据
  41. @classmethod
  42. def get_metainfo(cls, crawler):
  43. """
  44. 获取表格元数据
  45. :return:
  46. """
  47. try:
  48. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  49. + cls.spreadsheettoken(crawler) + "/metainfo"
  50. headers = {
  51. "Authorization": "Bearer " + cls.get_token(),
  52. "Content-Type": "application/json; charset=utf-8"
  53. }
  54. params = {
  55. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  56. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  57. }
  58. urllib3.disable_warnings()
  59. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  60. response = json.loads(r.content.decode("utf8"))
  61. return response
  62. except Exception as e:
  63. logger.error("获取表格元数据异常:{}", e)
  64. # 读取工作表中所有数据
  65. @classmethod
  66. def get_values_batch(cls, crawler, sheetid):
  67. """
  68. 读取工作表中所有数据
  69. :param crawler: 哪个爬虫
  70. :param sheetid: 哪张表
  71. :return: 所有数据
  72. """
  73. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  74. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  75. headers = {
  76. "Authorization": "Bearer " + cls.get_token(),
  77. "Content-Type": "application/json; charset=utf-8"
  78. }
  79. params = {
  80. "ranges": sheetid,
  81. "valueRenderOption": "ToString",
  82. "dateTimeRenderOption": "",
  83. "user_id_type": "open_id"
  84. }
  85. urllib3.disable_warnings()
  86. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  87. response = json.loads(r.content.decode("utf8"))
  88. values = response["data"]["valueRanges"][0]["values"]
  89. return values
  90. # 工作表,插入行或列
  91. @classmethod
  92. def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
  93. """
  94. 工作表插入行或列
  95. :param log_type: 日志路径
  96. :param crawler: 哪个爬虫的云文档
  97. :param sheetid:哪张工作表
  98. :param majordimension:行或者列, ROWS、COLUMNS
  99. :param startindex:开始位置
  100. :param endindex:结束位置
  101. """
  102. try:
  103. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  104. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  105. headers = {
  106. "Authorization": "Bearer " + cls.get_token(),
  107. "Content-Type": "application/json; charset=utf-8"
  108. }
  109. body = {
  110. "dimension": {
  111. "sheetId": sheetid,
  112. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  113. "startIndex": startindex, # 开始的位置
  114. "endIndex": endindex # 结束的位置
  115. },
  116. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  117. }
  118. urllib3.disable_warnings()
  119. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  120. except Exception as e:
  121. logger.error("插入行或列异常:{}", e)
  122. # 写入数据
  123. @classmethod
  124. def update_values(cls, crawler, sheetid, ranges, values):
  125. """
  126. 写入数据
  127. :param log_type: 日志路径
  128. :param crawler: 哪个爬虫的云文档
  129. :param sheetid:哪张工作表
  130. :param ranges:单元格范围
  131. :param values:写入的具体数据,list
  132. """
  133. try:
  134. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  135. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  136. headers = {
  137. "Authorization": "Bearer " + cls.get_token(),
  138. "Content-Type": "application/json; charset=utf-8"
  139. }
  140. body = {
  141. "valueRanges": [
  142. {
  143. "range": sheetid + "!" + ranges,
  144. "values": values
  145. },
  146. ],
  147. }
  148. urllib3.disable_warnings()
  149. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  150. except Exception as e:
  151. logger.error("写入数据异常:{}", e)
  152. # 合并单元格
  153. @classmethod
  154. def merge_cells(cls, crawler, sheetid, ranges):
  155. """
  156. 合并单元格
  157. :param log_type: 日志路径
  158. :param crawler: 哪个爬虫
  159. :param sheetid:哪张工作表
  160. :param ranges:需要合并的单元格范围
  161. """
  162. try:
  163. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  164. + cls.spreadsheettoken(crawler) + "/merge_cells"
  165. headers = {
  166. "Authorization": "Bearer " + cls.get_token(),
  167. "Content-Type": "application/json; charset=utf-8"
  168. }
  169. body = {
  170. "range": sheetid + "!" + ranges,
  171. "mergeType": "MERGE_ROWS"
  172. }
  173. urllib3.disable_warnings()
  174. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  175. except Exception as e:
  176. logger.error("合并单元格异常:{}", e)
  177. # 读取单元格数据
  178. @classmethod
  179. def get_range_value(cls, crawler, sheetid, cell):
  180. """
  181. 读取单元格内容
  182. :param log_type: 日志路径
  183. :param crawler: 哪个爬虫
  184. :param sheetid: 哪张工作表
  185. :param cell: 哪个单元格
  186. :return: 单元格内容
  187. """
  188. try:
  189. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  190. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  191. headers = {
  192. "Authorization": "Bearer " + cls.get_token(),
  193. "Content-Type": "application/json; charset=utf-8"
  194. }
  195. params = {
  196. "valueRenderOption": "FormattedValue",
  197. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  198. "dateTimeRenderOption": "",
  199. # 返回的用户id类型,可选open_id,union_id
  200. "user_id_type": "open_id"
  201. }
  202. urllib3.disable_warnings()
  203. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  204. # logger.error(r.text)
  205. return r.json()["data"]["valueRange"]["values"][0]
  206. except Exception as e:
  207. logger.error("读取单元格数据异常:{}", e)
  208. # 获取表内容
  209. @classmethod
  210. def get_sheet_content(cls, crawler, sheet_id):
  211. try:
  212. sheet = Feishu.get_values_batch(crawler, sheet_id)
  213. content_list = []
  214. for x in sheet:
  215. for y in x:
  216. if y is None:
  217. pass
  218. else:
  219. content_list.append(y)
  220. return content_list
  221. except Exception as e:
  222. logger.error(f'get_sheet_content:{e}\n')
  223. # 删除行或列,可选 ROWS、COLUMNS
  224. @classmethod
  225. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  226. """
  227. 删除行或列
  228. :param log_type: 日志路径
  229. :param crawler: 哪个爬虫
  230. :param sheetid:工作表
  231. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  232. :param startindex:开始的位置
  233. :param endindex:结束的位置
  234. :return:
  235. """
  236. try:
  237. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  238. + cls.spreadsheettoken(crawler) + "/dimension_range"
  239. headers = {
  240. "Authorization": "Bearer " + cls.get_token(),
  241. "Content-Type": "application/json; charset=utf-8"
  242. }
  243. body = {
  244. "dimension": {
  245. "sheetId": sheetid,
  246. "majorDimension": major_dimension,
  247. "startIndex": startindex,
  248. "endIndex": endindex
  249. }
  250. }
  251. urllib3.disable_warnings()
  252. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  253. except Exception as e:
  254. logger.error("删除视频数据异常:{}", e)
  255. # 获取用户 ID
  256. @classmethod
  257. def get_userid(cls, username):
  258. try:
  259. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  260. headers = {
  261. "Authorization": "Bearer " + cls.get_token(),
  262. "Content-Type": "application/json; charset=utf-8"
  263. }
  264. name_phone_dict = {
  265. "xinxin": "15546206651",
  266. "muxinyi": "13699208058",
  267. "wangxueke": "13513479926",
  268. "yuzhuoyi": "18624010360",
  269. "luojunhui": "18801281360",
  270. "fanjun": "15200827642",
  271. "zhangyong": "17600025055",
  272. 'liukunyu': "18810931977"
  273. }
  274. username = name_phone_dict.get(username)
  275. data = {"mobiles": [username]}
  276. urllib3.disable_warnings()
  277. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  278. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  279. return open_id
  280. except Exception as e:
  281. pass
  282. # logger.error(f"get_userid异常:{e}\n")
  283. # 飞书机器人
  284. @classmethod
  285. def bot(cls, log_type, crawler, text, mark_name):
  286. try:
  287. headers = {'Content-Type': 'application/json'}
  288. if crawler == "机器自动改造消息通知":
  289. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  290. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  291. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  292. elif crawler == "快手关键词搜索":
  293. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  294. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
  295. users = "".join([f'<at id="{cls.get_userid(type)}">{name}</at>' for type, name in
  296. zip(log_type, mark_name)])
  297. # users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  298. else:
  299. url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
  300. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  301. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  302. data = json.dumps({
  303. "msg_type": "interactive",
  304. "card": {
  305. "config": {
  306. "wide_screen_mode": True,
  307. "enable_forward": True
  308. },
  309. "elements": [{
  310. "tag": "div",
  311. "text": {
  312. "content": users + text,
  313. "tag": "lark_md"
  314. }
  315. }, {
  316. "actions": [{
  317. "tag": "button",
  318. "text": {
  319. "content": "详情,点击~~~~~",
  320. "tag": "lark_md"
  321. },
  322. "url": sheet_url,
  323. "type": "default",
  324. "value": {}
  325. }],
  326. "tag": "action"
  327. }],
  328. "header": {
  329. "title": {
  330. "content": "📣消息提醒",
  331. "tag": "plain_text"
  332. }
  333. }
  334. }
  335. })
  336. urllib3.disable_warnings()
  337. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  338. except Exception as e:
  339. logger.error(f"bot异常:{e}\n")
  340. # 飞书机器人-改造计划完成通知
  341. @classmethod
  342. def finish_bot(cls, text, url, content):
  343. try:
  344. headers = {'Content-Type': 'application/json'}
  345. data = json.dumps({
  346. "msg_type": "interactive",
  347. "card": {
  348. "config": {
  349. "wide_screen_mode": True,
  350. "enable_forward": True
  351. },
  352. "elements": [{
  353. "tag": "div",
  354. "text": {
  355. "content": text,
  356. "tag": "lark_md"
  357. }
  358. }],
  359. "header": {
  360. "title": {
  361. "content": content,
  362. "tag": "plain_text"
  363. }
  364. }
  365. }
  366. })
  367. urllib3.disable_warnings()
  368. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  369. except Exception as e:
  370. logger.error(f"bot异常:{e}\n")
  371. if __name__ == "__main__":
  372. Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')