feishu_lib.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/5/9
  4. import json
  5. import requests
  6. import urllib3
  7. from main.common import Common
  8. proxies = {"http": None, "https": None}
  9. class Feishu:
  10. """
  11. 编辑飞书云文档
  12. """
  13. # 看一看爬虫数据表
  14. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  15. # 快手爬虫数据表
  16. # kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
  17. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  18. # 微视爬虫数据表
  19. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  20. # 小年糕爬虫数据表
  21. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  22. # 数据监控表
  23. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  24. # 手机号
  25. wangkun = "13426262515"
  26. gaonannan = "18501180073"
  27. xinxin = "15546206651"
  28. huxinxue = "18832292015"
  29. # 飞书路径token
  30. @classmethod
  31. def spreadsheettoken(cls, crawler):
  32. """
  33. :param crawler: 哪个爬虫
  34. """
  35. if crawler == "kanyikan":
  36. return "shtcngRPoDYAi24x52j2nDuHMih"
  37. elif crawler == "kuaishou":
  38. # return "shtcnp4SaJt37q6OOOrYzPMjQkg"
  39. return "shtcnICEfaw9llDNQkKgdymM1xf"
  40. elif crawler == "weishi":
  41. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  42. elif crawler == "xiaoniangao":
  43. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  44. elif crawler == "monitor":
  45. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  46. # 获取飞书api token
  47. @classmethod
  48. def get_token(cls, log_type):
  49. """
  50. 获取飞书api token
  51. :return:
  52. """
  53. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  54. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  55. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  56. try:
  57. urllib3.disable_warnings()
  58. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  59. tenant_access_token = response.json()["tenant_access_token"]
  60. return tenant_access_token
  61. except Exception as e:
  62. Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
  63. # 获取表格元数据
  64. @classmethod
  65. def get_metainfo(cls, log_type, crawler):
  66. """
  67. 获取表格元数据
  68. :return:
  69. """
  70. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  71. + cls.spreadsheettoken(crawler) + "/metainfo"
  72. headers = {
  73. "Authorization": "Bearer " + cls.get_token(log_type),
  74. "Content-Type": "application/json; charset=utf-8"
  75. }
  76. params = {
  77. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  78. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  79. }
  80. try:
  81. urllib3.disable_warnings()
  82. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  83. response = json.loads(r.content.decode("utf8"))
  84. return response
  85. except Exception as e:
  86. Common.logger(log_type).error("获取表格元数据异常:{}", e)
  87. # 读取工作表中所有数据
  88. @classmethod
  89. def get_values_batch(cls, log_type, crawler, sheetid):
  90. """
  91. 读取工作表中所有数据
  92. :param log_type: 启用哪个 log
  93. :param crawler: 哪个爬虫
  94. :param sheetid: 哪张表
  95. :return: 所有数据
  96. """
  97. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  98. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  99. headers = {
  100. "Authorization": "Bearer " + cls.get_token(log_type),
  101. "Content-Type": "application/json; charset=utf-8"
  102. }
  103. params = {
  104. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  105. "ranges": sheetid,
  106. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  107. # valueRenderOption=FormattedValue 计算并格式化单元格;
  108. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  109. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  110. "valueRenderOption": "ToString",
  111. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  112. "dateTimeRenderOption": "",
  113. # 返回的用户id类型,可选open_id,union_id
  114. "user_id_type": "open_id"
  115. }
  116. try:
  117. urllib3.disable_warnings()
  118. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  119. # print(r.text)
  120. response = json.loads(r.content.decode("utf8"))
  121. values = response["data"]["valueRanges"][0]["values"]
  122. return values
  123. except Exception as e:
  124. Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
  125. # 工作表,插入行或列
  126. @classmethod
  127. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  128. """
  129. 工作表插入行或列
  130. :param log_type: 日志路径
  131. :param crawler: 哪个爬虫的云文档
  132. :param sheetid:哪张工作表
  133. :param majordimension:行或者列, ROWS、COLUMNS
  134. :param startindex:开始位置
  135. :param endindex:结束位置
  136. """
  137. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  138. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  139. headers = {
  140. "Authorization": "Bearer " + cls.get_token(log_type),
  141. "Content-Type": "application/json; charset=utf-8"
  142. }
  143. body = {
  144. "dimension": {
  145. "sheetId": sheetid,
  146. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  147. "startIndex": startindex, # 开始的位置
  148. "endIndex": endindex # 结束的位置
  149. },
  150. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  151. }
  152. try:
  153. urllib3.disable_warnings()
  154. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  155. Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
  156. except Exception as e:
  157. Common.logger(log_type).error("插入行或列异常:{}", e)
  158. # 写入数据
  159. @classmethod
  160. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  161. """
  162. 写入数据
  163. :param log_type: 日志路径
  164. :param crawler: 哪个爬虫的云文档
  165. :param sheetid:哪张工作表
  166. :param ranges:单元格范围
  167. :param values:写入的具体数据,list
  168. """
  169. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  170. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  171. headers = {
  172. "Authorization": "Bearer " + cls.get_token(log_type),
  173. "Content-Type": "application/json; charset=utf-8"
  174. }
  175. body = {
  176. "valueRanges": [
  177. {
  178. "range": sheetid + "!" + ranges,
  179. "values": values
  180. },
  181. ],
  182. }
  183. try:
  184. urllib3.disable_warnings()
  185. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  186. Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
  187. except Exception as e:
  188. Common.logger(log_type).error("写入数据异常:{}", e)
  189. # 合并单元格
  190. @classmethod
  191. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  192. """
  193. 合并单元格
  194. :param log_type: 日志路径
  195. :param crawler: 哪个爬虫
  196. :param sheetid:哪张工作表
  197. :param ranges:需要合并的单元格范围
  198. """
  199. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  200. + cls.spreadsheettoken(crawler) + "/merge_cells"
  201. headers = {
  202. "Authorization": "Bearer " + cls.get_token(log_type),
  203. "Content-Type": "application/json; charset=utf-8"
  204. }
  205. body = {
  206. "range": sheetid + "!" + ranges,
  207. "mergeType": "MERGE_ROWS"
  208. }
  209. try:
  210. urllib3.disable_warnings()
  211. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  212. Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
  213. except Exception as e:
  214. Common.logger(log_type).error("合并单元格异常:{}", e)
  215. # 读取单元格数据
  216. @classmethod
  217. def get_range_value(cls, log_type, crawler, sheetid, cell):
  218. """
  219. 读取单元格内容
  220. :param log_type: 日志路径
  221. :param crawler: 哪个爬虫
  222. :param sheetid: 哪张工作表
  223. :param cell: 哪个单元格
  224. :return: 单元格内容
  225. """
  226. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  227. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  228. headers = {
  229. "Authorization": "Bearer " + cls.get_token(log_type),
  230. "Content-Type": "application/json; charset=utf-8"
  231. }
  232. params = {
  233. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  234. # valueRenderOption=FormattedValue 计算并格式化单元格;
  235. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  236. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  237. "valueRenderOption": "FormattedValue",
  238. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  239. "dateTimeRenderOption": "",
  240. # 返回的用户id类型,可选open_id,union_id
  241. "user_id_type": "open_id"
  242. }
  243. try:
  244. urllib3.disable_warnings()
  245. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  246. # print(r.text)
  247. return r.json()["data"]["valueRange"]["values"][0]
  248. except Exception as e:
  249. Common.logger(log_type).error("读取单元格数据异常:{}", e)
  250. # 删除行或列,可选 ROWS、COLUMNS
  251. @classmethod
  252. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  253. """
  254. 删除行或列
  255. :param log_type: 日志路径
  256. :param crawler: 哪个爬虫
  257. :param sheetid:工作表
  258. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  259. :param startindex:开始的位置
  260. :param endindex:结束的位置
  261. :return:
  262. """
  263. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  264. + cls.spreadsheettoken(crawler) + "/dimension_range"
  265. headers = {
  266. "Authorization": "Bearer " + cls.get_token(log_type),
  267. "Content-Type": "application/json; charset=utf-8"
  268. }
  269. body = {
  270. "dimension": {
  271. "sheetId": sheetid,
  272. "majorDimension": major_dimension,
  273. "startIndex": startindex,
  274. "endIndex": endindex
  275. }
  276. }
  277. try:
  278. urllib3.disable_warnings()
  279. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  280. Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
  281. except Exception as e:
  282. Common.logger(log_type).error("删除视频数据异常:{}", e)
  283. # 获取用户 ID
  284. @classmethod
  285. def get_userid(cls, log_type, username):
  286. try:
  287. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  288. headers = {
  289. "Authorization": "Bearer " + cls.get_token(log_type),
  290. "Content-Type": "application/json; charset=utf-8"
  291. }
  292. if username == "wangkun":
  293. username = cls.wangkun
  294. elif username == "gaonannan":
  295. username = cls.gaonannan
  296. elif username == "xinxin":
  297. username = cls.xinxin
  298. elif username == "huxinxue":
  299. username = cls.huxinxue
  300. data = {"mobiles": [username]}
  301. urllib3.disable_warnings()
  302. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  303. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  304. Common.logger(log_type).info("{}:{}", username, open_id)
  305. # print(f"{username}:{open_id}")
  306. return open_id
  307. except Exception as e:
  308. Common.logger(log_type).error("get_userid异常:{}", e)
  309. # 飞书机器人
  310. @classmethod
  311. def bot(cls, log_type, content):
  312. try:
  313. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  314. headers = {
  315. 'Content-Type': 'application/json'
  316. }
  317. data = json.dumps({
  318. "msg_type": "interactive",
  319. "card": {
  320. "config": {
  321. "wide_screen_mode": True,
  322. "enable_forward": True
  323. },
  324. "elements": [{
  325. "tag": "div",
  326. "text": {
  327. "content": "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n" + content,
  328. "tag": "lark_md"
  329. }
  330. }, {
  331. "actions": [{
  332. "tag": "button",
  333. "text": {
  334. "content": "快手爬虫表",
  335. "tag": "lark_md"
  336. },
  337. "url": "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf",
  338. "type": "default",
  339. "value": {}
  340. },
  341. {
  342. "tag": "button",
  343. "text": {
  344. "content": "快手Jenkins",
  345. "tag": "lark_md"
  346. },
  347. "url": "https://jenkins-on.yishihui.com/view/%E7%88%AC%E8%99%AB-Spider/job/%E5%BF%"
  348. "AB%E6%89%8B%E5%B0%8F%E7%A8%8B%E5%BA%8F-%E8%A7%86%E9%A2%91%E7%88%AC%E5%8F%96/",
  349. "type": "default",
  350. "value": {}
  351. }
  352. ],
  353. "tag": "action"
  354. }],
  355. "header": {
  356. "title": {
  357. "content": "📣有新的报警,请注意查处",
  358. "tag": "plain_text"
  359. }
  360. }
  361. }
  362. })
  363. urllib3.disable_warnings()
  364. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  365. Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
  366. except Exception as e:
  367. Common.logger(log_type).error("bot异常:{}", e)
  368. if __name__ == "__main__":
  369. Feishu.bot("kuaishou", "我是快手测试内容,请忽略")
  370. # Feishu.get_userid("kuaishou", "huxinxue")
  371. # Feishu.get_department("kuaishou")
  372. # print(Feishu.get_range_value("person", "xiaoniangao", "dzcWHw", "B4:B4")[0])
  373. # print(Feishu.get_range_value("person", "xiaoniangao", "dzcWHw", "C5:C5")[0][0]["link"])
  374. # print(Feishu.get_range_value("person", "xiaoniangao", "dzcWHw", "B6:B6")[0])
  375. # print(Feishu.get_range_value("person", "xiaoniangao", "dzcWHw", "B7:B7")[0])
  376. pass