feishu_lib.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/1/3
  4. import json
  5. import requests
  6. import urllib3
  7. from main.common import Common
  8. proxies = {"http": None, "https": None}
  9. class Feishu:
  10. """
  11. 编辑飞书云文档
  12. """
  13. # 胜胜影音
  14. crawler_shengshengyingyin = 'https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe'
  15. # 飞书路径token
  16. @classmethod
  17. def spreadsheettoken(cls, crawler):
  18. """
  19. :param crawler: 哪个爬虫
  20. """
  21. if crawler == "kanyikan":
  22. return "shtcngRPoDYAi24x52j2nDuHMih"
  23. elif crawler == "kuaishou":
  24. return "shtcnp4SaJt37q6OOOrYzPMjQkg"
  25. elif crawler == "weishi":
  26. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  27. elif crawler == "xiaoniangao":
  28. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  29. elif crawler == "monitor":
  30. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  31. elif crawler == "bszf":
  32. return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
  33. elif crawler == "ssyy":
  34. return "shtcnz1ymxHL1u8WHblfqfys7qe"
  35. # 获取飞书api token
  36. @classmethod
  37. def get_token(cls, log_type):
  38. """
  39. 获取飞书api token
  40. :return:
  41. """
  42. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  43. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  44. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  45. try:
  46. urllib3.disable_warnings()
  47. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  48. tenant_access_token = response.json()["tenant_access_token"]
  49. return tenant_access_token
  50. except Exception as e:
  51. Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
  52. # 获取表格元数据
  53. @classmethod
  54. def get_metainfo(cls, log_type, crawler):
  55. """
  56. 获取表格元数据
  57. :return:
  58. """
  59. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  60. + cls.spreadsheettoken(crawler) + "/metainfo"
  61. headers = {
  62. "Authorization": "Bearer " + cls.get_token(log_type),
  63. "Content-Type": "application/json; charset=utf-8"
  64. }
  65. params = {
  66. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  67. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  68. }
  69. try:
  70. urllib3.disable_warnings()
  71. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  72. response = json.loads(r.content.decode("utf8"))
  73. return response
  74. except Exception as e:
  75. Common.logger(log_type).error("获取表格元数据异常:{}", e)
  76. # 读取工作表中所有数据
  77. @classmethod
  78. def get_values_batch(cls, log_type, crawler, sheetid):
  79. """
  80. 读取工作表中所有数据
  81. :param log_type: 启用哪个 log
  82. :param crawler: 哪个爬虫
  83. :param sheetid: 哪张表
  84. :return: 所有数据
  85. """
  86. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  87. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  88. headers = {
  89. "Authorization": "Bearer " + cls.get_token(log_type),
  90. "Content-Type": "application/json; charset=utf-8"
  91. }
  92. params = {
  93. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  94. "ranges": sheetid,
  95. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  96. # valueRenderOption=FormattedValue 计算并格式化单元格;
  97. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  98. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  99. "valueRenderOption": "ToString",
  100. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  101. "dateTimeRenderOption": "",
  102. # 返回的用户id类型,可选open_id,union_id
  103. "user_id_type": "open_id"
  104. }
  105. try:
  106. urllib3.disable_warnings()
  107. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  108. # print(r.text)
  109. response = json.loads(r.content.decode("utf8"))
  110. values = response["data"]["valueRanges"][0]["values"]
  111. return values
  112. except Exception as e:
  113. Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
  114. # 工作表,插入行或列
  115. @classmethod
  116. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  117. """
  118. 工作表插入行或列
  119. :param log_type: 日志路径
  120. :param crawler: 哪个爬虫的云文档
  121. :param sheetid:哪张工作表
  122. :param majordimension:行或者列, ROWS、COLUMNS
  123. :param startindex:开始位置
  124. :param endindex:结束位置
  125. """
  126. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  127. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  128. headers = {
  129. "Authorization": "Bearer " + cls.get_token(log_type),
  130. "Content-Type": "application/json; charset=utf-8"
  131. }
  132. body = {
  133. "dimension": {
  134. "sheetId": sheetid,
  135. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  136. "startIndex": startindex, # 开始的位置
  137. "endIndex": endindex # 结束的位置
  138. },
  139. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  140. }
  141. try:
  142. urllib3.disable_warnings()
  143. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  144. Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
  145. except Exception as e:
  146. Common.logger(log_type).error("插入行或列异常:{}", e)
  147. # 写入数据
  148. @classmethod
  149. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  150. """
  151. 写入数据
  152. :param log_type: 日志路径
  153. :param crawler: 哪个爬虫的云文档
  154. :param sheetid:哪张工作表
  155. :param ranges:单元格范围
  156. :param values:写入的具体数据,list
  157. """
  158. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  159. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  160. headers = {
  161. "Authorization": "Bearer " + cls.get_token(log_type),
  162. "Content-Type": "application/json; charset=utf-8"
  163. }
  164. body = {
  165. "valueRanges": [
  166. {
  167. "range": sheetid + "!" + ranges,
  168. "values": values
  169. },
  170. ],
  171. }
  172. try:
  173. urllib3.disable_warnings()
  174. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  175. Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
  176. except Exception as e:
  177. Common.logger(log_type).error("写入数据异常:{}", e)
  178. # 合并单元格
  179. @classmethod
  180. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  181. """
  182. 合并单元格
  183. :param log_type: 日志路径
  184. :param crawler: 哪个爬虫
  185. :param sheetid:哪张工作表
  186. :param ranges:需要合并的单元格范围
  187. """
  188. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  189. + cls.spreadsheettoken(crawler) + "/merge_cells"
  190. headers = {
  191. "Authorization": "Bearer " + cls.get_token(log_type),
  192. "Content-Type": "application/json; charset=utf-8"
  193. }
  194. body = {
  195. "range": sheetid + "!" + ranges,
  196. "mergeType": "MERGE_ROWS"
  197. }
  198. try:
  199. urllib3.disable_warnings()
  200. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  201. Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
  202. except Exception as e:
  203. Common.logger(log_type).error("合并单元格异常:{}", e)
  204. # 读取单元格数据
  205. @classmethod
  206. def get_range_value(cls, log_type, crawler, sheetid, cell):
  207. """
  208. 读取单元格内容
  209. :param log_type: 日志路径
  210. :param crawler: 哪个爬虫
  211. :param sheetid: 哪张工作表
  212. :param cell: 哪个单元格
  213. :return: 单元格内容
  214. """
  215. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  216. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  217. headers = {
  218. "Authorization": "Bearer " + cls.get_token(log_type),
  219. "Content-Type": "application/json; charset=utf-8"
  220. }
  221. params = {
  222. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  223. # valueRenderOption=FormattedValue 计算并格式化单元格;
  224. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  225. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  226. "valueRenderOption": "FormattedValue",
  227. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  228. "dateTimeRenderOption": "",
  229. # 返回的用户id类型,可选open_id,union_id
  230. "user_id_type": "open_id"
  231. }
  232. try:
  233. urllib3.disable_warnings()
  234. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  235. # print(r.text)
  236. return r.json()["data"]["valueRange"]["values"][0]
  237. except Exception as e:
  238. Common.logger(log_type).error("读取单元格数据异常:{}", e)
  239. # 删除行或列,可选 ROWS、COLUMNS
  240. @classmethod
  241. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  242. """
  243. 删除行或列
  244. :param log_type: 日志路径
  245. :param crawler: 哪个爬虫
  246. :param sheetid:工作表
  247. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  248. :param startindex:开始的位置
  249. :param endindex:结束的位置
  250. :return:
  251. """
  252. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  253. + cls.spreadsheettoken(crawler) + "/dimension_range"
  254. headers = {
  255. "Authorization": "Bearer " + cls.get_token(log_type),
  256. "Content-Type": "application/json; charset=utf-8"
  257. }
  258. body = {
  259. "dimension": {
  260. "sheetId": sheetid,
  261. "majorDimension": major_dimension,
  262. "startIndex": startindex,
  263. "endIndex": endindex
  264. }
  265. }
  266. try:
  267. urllib3.disable_warnings()
  268. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  269. Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
  270. except Exception as e:
  271. Common.logger(log_type).error("删除视频数据异常:{}", e)
  272. if __name__ == "__main__":
  273. feishu = Feishu()
  274. pass