feishu_utils.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/12/26
  3. """
  4. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  5. """
  6. import json
  7. import requests
  8. import urllib3
  9. from loguru import logger
  10. proxies = {"http": None, "https": None}
  11. class Feishu:
  12. """
  13. 编辑飞书云文档
  14. """
  15. succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
  16. # 飞书路径token
  17. @classmethod
  18. def spreadsheettoken(cls, crawler):
  19. if crawler == "summary":
  20. return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
  21. else:
  22. return crawler
  23. # 获取飞书api token
  24. @classmethod
  25. def get_token(cls):
  26. """
  27. 获取飞书api token
  28. :return:
  29. """
  30. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  31. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  32. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  33. urllib3.disable_warnings()
  34. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  35. tenant_access_token = response.json()["tenant_access_token"]
  36. return tenant_access_token
  37. # 获取表格元数据
  38. @classmethod
  39. def get_metainfo(cls, crawler):
  40. """
  41. 获取表格元数据
  42. :return:
  43. """
  44. try:
  45. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  46. + cls.spreadsheettoken(crawler) + "/metainfo"
  47. headers = {
  48. "Authorization": "Bearer " + cls.get_token(),
  49. "Content-Type": "application/json; charset=utf-8"
  50. }
  51. params = {
  52. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  53. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  54. }
  55. urllib3.disable_warnings()
  56. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  57. response = json.loads(r.content.decode("utf8"))
  58. return response
  59. except Exception as e:
  60. logger.error("获取表格元数据异常:{}", e)
  61. # 读取工作表中所有数据
  62. @classmethod
  63. def get_values_batch(cls, crawler, sheetid):
  64. """
  65. 读取工作表中所有数据
  66. :param crawler: 哪个爬虫
  67. :param sheetid: 哪张表
  68. :return: 所有数据
  69. """
  70. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  71. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  72. headers = {
  73. "Authorization": "Bearer " + cls.get_token(),
  74. "Content-Type": "application/json; charset=utf-8"
  75. }
  76. params = {
  77. "ranges": sheetid,
  78. "valueRenderOption": "ToString",
  79. "dateTimeRenderOption": "",
  80. "user_id_type": "open_id"
  81. }
  82. urllib3.disable_warnings()
  83. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  84. response = json.loads(r.content.decode("utf8"))
  85. values = response["data"]["valueRanges"][0]["values"]
  86. return values
  87. # 工作表,插入行或列
  88. @classmethod
  89. def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
  90. """
  91. 工作表插入行或列
  92. :param log_type: 日志路径
  93. :param crawler: 哪个爬虫的云文档
  94. :param sheetid:哪张工作表
  95. :param majordimension:行或者列, ROWS、COLUMNS
  96. :param startindex:开始位置
  97. :param endindex:结束位置
  98. """
  99. try:
  100. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  101. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  102. headers = {
  103. "Authorization": "Bearer " + cls.get_token(),
  104. "Content-Type": "application/json; charset=utf-8"
  105. }
  106. body = {
  107. "dimension": {
  108. "sheetId": sheetid,
  109. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  110. "startIndex": startindex, # 开始的位置
  111. "endIndex": endindex # 结束的位置
  112. },
  113. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  114. }
  115. urllib3.disable_warnings()
  116. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  117. except Exception as e:
  118. logger.error("插入行或列异常:{}", e)
  119. # 写入数据
  120. @classmethod
  121. def update_values(cls, crawler, sheetid, ranges, values):
  122. """
  123. 写入数据
  124. :param log_type: 日志路径
  125. :param crawler: 哪个爬虫的云文档
  126. :param sheetid:哪张工作表
  127. :param ranges:单元格范围
  128. :param values:写入的具体数据,list
  129. """
  130. try:
  131. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  132. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  133. headers = {
  134. "Authorization": "Bearer " + cls.get_token(),
  135. "Content-Type": "application/json; charset=utf-8"
  136. }
  137. body = {
  138. "valueRanges": [
  139. {
  140. "range": sheetid + "!" + ranges,
  141. "values": values
  142. },
  143. ],
  144. }
  145. urllib3.disable_warnings()
  146. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  147. except Exception as e:
  148. logger.error("写入数据异常:{}", e)
  149. # 合并单元格
  150. @classmethod
  151. def merge_cells(cls, crawler, sheetid, ranges):
  152. """
  153. 合并单元格
  154. :param log_type: 日志路径
  155. :param crawler: 哪个爬虫
  156. :param sheetid:哪张工作表
  157. :param ranges:需要合并的单元格范围
  158. """
  159. try:
  160. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  161. + cls.spreadsheettoken(crawler) + "/merge_cells"
  162. headers = {
  163. "Authorization": "Bearer " + cls.get_token(),
  164. "Content-Type": "application/json; charset=utf-8"
  165. }
  166. body = {
  167. "range": sheetid + "!" + ranges,
  168. "mergeType": "MERGE_ROWS"
  169. }
  170. urllib3.disable_warnings()
  171. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  172. except Exception as e:
  173. logger.error("合并单元格异常:{}", e)
  174. # 读取单元格数据
  175. @classmethod
  176. def get_range_value(cls, crawler, sheetid, cell):
  177. """
  178. 读取单元格内容
  179. :param log_type: 日志路径
  180. :param crawler: 哪个爬虫
  181. :param sheetid: 哪张工作表
  182. :param cell: 哪个单元格
  183. :return: 单元格内容
  184. """
  185. try:
  186. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  187. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  188. headers = {
  189. "Authorization": "Bearer " + cls.get_token(),
  190. "Content-Type": "application/json; charset=utf-8"
  191. }
  192. params = {
  193. "valueRenderOption": "FormattedValue",
  194. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  195. "dateTimeRenderOption": "",
  196. # 返回的用户id类型,可选open_id,union_id
  197. "user_id_type": "open_id"
  198. }
  199. urllib3.disable_warnings()
  200. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  201. # logger.error(r.text)
  202. return r.json()["data"]["valueRange"]["values"][0]
  203. except Exception as e:
  204. logger.error("读取单元格数据异常:{}", e)
  205. # 获取表内容
  206. @classmethod
  207. def get_sheet_content(cls, crawler, sheet_id):
  208. try:
  209. sheet = Feishu.get_values_batch(crawler, sheet_id)
  210. content_list = []
  211. for x in sheet:
  212. for y in x:
  213. if y is None:
  214. pass
  215. else:
  216. content_list.append(y)
  217. return content_list
  218. except Exception as e:
  219. logger.error(f'get_sheet_content:{e}\n')
  220. # 删除行或列,可选 ROWS、COLUMNS
  221. @classmethod
  222. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  223. """
  224. 删除行或列
  225. :param log_type: 日志路径
  226. :param crawler: 哪个爬虫
  227. :param sheetid:工作表
  228. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  229. :param startindex:开始的位置
  230. :param endindex:结束的位置
  231. :return:
  232. """
  233. try:
  234. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  235. + cls.spreadsheettoken(crawler) + "/dimension_range"
  236. headers = {
  237. "Authorization": "Bearer " + cls.get_token(),
  238. "Content-Type": "application/json; charset=utf-8"
  239. }
  240. body = {
  241. "dimension": {
  242. "sheetId": sheetid,
  243. "majorDimension": major_dimension,
  244. "startIndex": startindex,
  245. "endIndex": endindex
  246. }
  247. }
  248. urllib3.disable_warnings()
  249. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  250. except Exception as e:
  251. logger.error("删除视频数据异常:{}", e)
  252. # 获取用户 ID
  253. @classmethod
  254. def get_userid(cls, username):
  255. try:
  256. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  257. headers = {
  258. "Authorization": "Bearer " + cls.get_token(),
  259. "Content-Type": "application/json; charset=utf-8"
  260. }
  261. name_phone_dict = {
  262. "xinxin": "15546206651",
  263. "muxinyi": "13699208058",
  264. "wangxueke": "13513479926",
  265. "yuzhuoyi": "18624010360",
  266. "luojunhui": "18801281360",
  267. "fanjun": "15200827642",
  268. "zhangyong": "17600025055",
  269. 'liukunyu': "18810931977"
  270. }
  271. username = name_phone_dict.get(username)
  272. data = {"mobiles": [username]}
  273. urllib3.disable_warnings()
  274. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  275. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  276. return open_id
  277. except Exception as e:
  278. pass
  279. # logger.error(f"get_userid异常:{e}\n")
  280. # 飞书机器人
  281. @classmethod
  282. def bot(cls, log_type, crawler, text, mark_name):
  283. try:
  284. headers = {'Content-Type': 'application/json'}
  285. if crawler == "机器自动改造消息通知":
  286. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  287. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  288. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  289. elif crawler == "快手关键词搜索":
  290. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  291. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
  292. users = "".join([f'<at id="{cls.get_userid(type)}">{name}</at>' for type, name in
  293. zip(log_type, mark_name)])
  294. # users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  295. else:
  296. url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
  297. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  298. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  299. data = json.dumps({
  300. "msg_type": "interactive",
  301. "card": {
  302. "config": {
  303. "wide_screen_mode": True,
  304. "enable_forward": True
  305. },
  306. "elements": [{
  307. "tag": "div",
  308. "text": {
  309. "content": users + text,
  310. "tag": "lark_md"
  311. }
  312. }, {
  313. "actions": [{
  314. "tag": "button",
  315. "text": {
  316. "content": "详情,点击~~~~~",
  317. "tag": "lark_md"
  318. },
  319. "url": sheet_url,
  320. "type": "default",
  321. "value": {}
  322. }],
  323. "tag": "action"
  324. }],
  325. "header": {
  326. "title": {
  327. "content": "📣消息提醒",
  328. "tag": "plain_text"
  329. }
  330. }
  331. }
  332. })
  333. urllib3.disable_warnings()
  334. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  335. except Exception as e:
  336. logger.error(f"bot异常:{e}\n")
  337. # 飞书机器人-改造计划完成通知
  338. @classmethod
  339. def finish_bot(cls, text, url, content):
  340. try:
  341. headers = {'Content-Type': 'application/json'}
  342. data = json.dumps({
  343. "msg_type": "interactive",
  344. "card": {
  345. "config": {
  346. "wide_screen_mode": True,
  347. "enable_forward": True
  348. },
  349. "elements": [{
  350. "tag": "div",
  351. "text": {
  352. "content": text,
  353. "tag": "lark_md"
  354. }
  355. }],
  356. "header": {
  357. "title": {
  358. "content": content,
  359. "tag": "plain_text"
  360. }
  361. }
  362. }
  363. })
  364. urllib3.disable_warnings()
  365. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  366. except Exception as e:
  367. logger.error(f"bot异常:{e}\n")
  368. if __name__ == "__main__":
  369. Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')