feishu_lib.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/12/22
  4. import json
  5. import requests
  6. import urllib3
  7. from main.common import Common
  8. proxies = {"http": None, "https": None}
  9. class Feishu:
  10. """
  11. 编辑飞书云文档
  12. """
  13. # 看一看爬虫数据表
  14. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  15. # 快手爬虫数据表
  16. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  17. # 微视爬虫数据表
  18. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  19. # 小年糕爬虫数据表
  20. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  21. # 数据监控表
  22. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  23. # 西瓜视频表
  24. crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
  25. # 看到就是福气
  26. crawler_kdjsfq = 'https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb'
  27. # 手机号
  28. wangkun = "13426262515"
  29. gaonannan = "18501180073"
  30. xinxin = "15546206651"
  31. huxinxue = "18832292015"
  32. # 飞书路径token
  33. @classmethod
  34. def spreadsheettoken(cls, crawler):
  35. """
  36. :param crawler: 哪个爬虫
  37. """
  38. if crawler == "kanyikan":
  39. return "shtcngRPoDYAi24x52j2nDuHMih"
  40. elif crawler == "kuaishou":
  41. # return "shtcnp4SaJt37q6OOOrYzPMjQkg"
  42. return "shtcnICEfaw9llDNQkKgdymM1xf"
  43. elif crawler == "weishi":
  44. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  45. elif crawler == "xiaoniangao":
  46. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  47. elif crawler == "monitor":
  48. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  49. elif crawler == "xigua":
  50. return "shtcnvOpx2P8vBXiV91Ot1MKIw8"
  51. elif crawler == "kdjsfq":
  52. return "shtcnEokBkIjOUPAk8vbbPKnXgb"
  53. # 获取飞书api token
  54. @classmethod
  55. def get_token(cls, log_type):
  56. """
  57. 获取飞书api token
  58. :return:
  59. """
  60. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  61. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  62. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  63. try:
  64. urllib3.disable_warnings()
  65. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  66. tenant_access_token = response.json()["tenant_access_token"]
  67. return tenant_access_token
  68. except Exception as e:
  69. Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
  70. # 获取表格元数据
  71. @classmethod
  72. def get_metainfo(cls, log_type, crawler):
  73. """
  74. 获取表格元数据
  75. :return:
  76. """
  77. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  78. + cls.spreadsheettoken(crawler) + "/metainfo"
  79. headers = {
  80. "Authorization": "Bearer " + cls.get_token(log_type),
  81. "Content-Type": "application/json; charset=utf-8"
  82. }
  83. params = {
  84. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  85. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  86. }
  87. try:
  88. urllib3.disable_warnings()
  89. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  90. response = json.loads(r.content.decode("utf8"))
  91. return response
  92. except Exception as e:
  93. Common.logger(log_type).error("获取表格元数据异常:{}", e)
  94. # 读取工作表中所有数据
  95. @classmethod
  96. def get_values_batch(cls, log_type, crawler, sheetid):
  97. """
  98. 读取工作表中所有数据
  99. :param log_type: 启用哪个 log
  100. :param crawler: 哪个爬虫
  101. :param sheetid: 哪张表
  102. :return: 所有数据
  103. """
  104. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  105. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  106. headers = {
  107. "Authorization": "Bearer " + cls.get_token(log_type),
  108. "Content-Type": "application/json; charset=utf-8"
  109. }
  110. params = {
  111. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  112. "ranges": sheetid,
  113. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  114. # valueRenderOption=FormattedValue 计算并格式化单元格;
  115. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  116. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  117. "valueRenderOption": "ToString",
  118. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  119. "dateTimeRenderOption": "",
  120. # 返回的用户id类型,可选open_id,union_id
  121. "user_id_type": "open_id"
  122. }
  123. try:
  124. urllib3.disable_warnings()
  125. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  126. # print(r.text)
  127. response = json.loads(r.content.decode("utf8"))
  128. values = response["data"]["valueRanges"][0]["values"]
  129. return values
  130. except Exception as e:
  131. Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
  132. # 工作表,插入行或列
  133. @classmethod
  134. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  135. """
  136. 工作表插入行或列
  137. :param log_type: 日志路径
  138. :param crawler: 哪个爬虫的云文档
  139. :param sheetid:哪张工作表
  140. :param majordimension:行或者列, ROWS、COLUMNS
  141. :param startindex:开始位置
  142. :param endindex:结束位置
  143. """
  144. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  145. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  146. headers = {
  147. "Authorization": "Bearer " + cls.get_token(log_type),
  148. "Content-Type": "application/json; charset=utf-8"
  149. }
  150. body = {
  151. "dimension": {
  152. "sheetId": sheetid,
  153. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  154. "startIndex": startindex, # 开始的位置
  155. "endIndex": endindex # 结束的位置
  156. },
  157. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  158. }
  159. try:
  160. urllib3.disable_warnings()
  161. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  162. Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
  163. except Exception as e:
  164. Common.logger(log_type).error("插入行或列异常:{}", e)
  165. # 写入数据
  166. @classmethod
  167. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  168. """
  169. 写入数据
  170. :param log_type: 日志路径
  171. :param crawler: 哪个爬虫的云文档
  172. :param sheetid:哪张工作表
  173. :param ranges:单元格范围
  174. :param values:写入的具体数据,list
  175. """
  176. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  177. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  178. headers = {
  179. "Authorization": "Bearer " + cls.get_token(log_type),
  180. "Content-Type": "application/json; charset=utf-8"
  181. }
  182. body = {
  183. "valueRanges": [
  184. {
  185. "range": sheetid + "!" + ranges,
  186. "values": values
  187. },
  188. ],
  189. }
  190. try:
  191. urllib3.disable_warnings()
  192. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  193. Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
  194. except Exception as e:
  195. Common.logger(log_type).error("写入数据异常:{}", e)
  196. # 合并单元格
  197. @classmethod
  198. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  199. """
  200. 合并单元格
  201. :param log_type: 日志路径
  202. :param crawler: 哪个爬虫
  203. :param sheetid:哪张工作表
  204. :param ranges:需要合并的单元格范围
  205. """
  206. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  207. + cls.spreadsheettoken(crawler) + "/merge_cells"
  208. headers = {
  209. "Authorization": "Bearer " + cls.get_token(log_type),
  210. "Content-Type": "application/json; charset=utf-8"
  211. }
  212. body = {
  213. "range": sheetid + "!" + ranges,
  214. "mergeType": "MERGE_ROWS"
  215. }
  216. try:
  217. urllib3.disable_warnings()
  218. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  219. Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
  220. except Exception as e:
  221. Common.logger(log_type).error("合并单元格异常:{}", e)
  222. # 读取单元格数据
  223. @classmethod
  224. def get_range_value(cls, log_type, crawler, sheetid, cell):
  225. """
  226. 读取单元格内容
  227. :param log_type: 日志路径
  228. :param crawler: 哪个爬虫
  229. :param sheetid: 哪张工作表
  230. :param cell: 哪个单元格
  231. :return: 单元格内容
  232. """
  233. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  234. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  235. headers = {
  236. "Authorization": "Bearer " + cls.get_token(log_type),
  237. "Content-Type": "application/json; charset=utf-8"
  238. }
  239. params = {
  240. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  241. # valueRenderOption=FormattedValue 计算并格式化单元格;
  242. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  243. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  244. "valueRenderOption": "FormattedValue",
  245. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  246. "dateTimeRenderOption": "",
  247. # 返回的用户id类型,可选open_id,union_id
  248. "user_id_type": "open_id"
  249. }
  250. try:
  251. urllib3.disable_warnings()
  252. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  253. # print(r.text)
  254. return r.json()["data"]["valueRange"]["values"][0]
  255. except Exception as e:
  256. Common.logger(log_type).error("读取单元格数据异常:{}", e)
  257. # 删除行或列,可选 ROWS、COLUMNS
  258. @classmethod
  259. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  260. """
  261. 删除行或列
  262. :param log_type: 日志路径
  263. :param crawler: 哪个爬虫
  264. :param sheetid:工作表
  265. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  266. :param startindex:开始的位置
  267. :param endindex:结束的位置
  268. :return:
  269. """
  270. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  271. + cls.spreadsheettoken(crawler) + "/dimension_range"
  272. headers = {
  273. "Authorization": "Bearer " + cls.get_token(log_type),
  274. "Content-Type": "application/json; charset=utf-8"
  275. }
  276. body = {
  277. "dimension": {
  278. "sheetId": sheetid,
  279. "majorDimension": major_dimension,
  280. "startIndex": startindex,
  281. "endIndex": endindex
  282. }
  283. }
  284. try:
  285. urllib3.disable_warnings()
  286. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  287. Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
  288. except Exception as e:
  289. Common.logger(log_type).error("删除视频数据异常:{}", e)
  290. # 获取用户 ID
  291. @classmethod
  292. def get_userid(cls, log_type, username):
  293. try:
  294. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  295. headers = {
  296. "Authorization": "Bearer " + cls.get_token(log_type),
  297. "Content-Type": "application/json; charset=utf-8"
  298. }
  299. if username == "wangkun":
  300. username = cls.wangkun
  301. elif username == "gaonannan":
  302. username = cls.gaonannan
  303. elif username == "xinxin":
  304. username = cls.xinxin
  305. elif username == "huxinxue":
  306. username = cls.huxinxue
  307. data = {"mobiles": [username]}
  308. urllib3.disable_warnings()
  309. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  310. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  311. Common.logger(log_type).info("{}:{}", username, open_id)
  312. # print(f"{username}:{open_id}")
  313. return open_id
  314. except Exception as e:
  315. Common.logger(log_type).error("get_userid异常:{}", e)
  316. # 飞书机器人
  317. @classmethod
  318. def bot(cls, log_type, content):
  319. try:
  320. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  321. headers = {
  322. 'Content-Type': 'application/json'
  323. }
  324. data = json.dumps({
  325. "msg_type": "interactive",
  326. "card": {
  327. "config": {
  328. "wide_screen_mode": True,
  329. "enable_forward": True
  330. },
  331. "elements": [{
  332. "tag": "div",
  333. "text": {
  334. "content": "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n" + content,
  335. "tag": "lark_md"
  336. }
  337. }, {
  338. "actions": [{
  339. "tag": "button",
  340. "text": {
  341. "content": "快手爬虫表",
  342. "tag": "lark_md"
  343. },
  344. "url": "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf",
  345. "type": "default",
  346. "value": {}
  347. },
  348. {
  349. "tag": "button",
  350. "text": {
  351. "content": "快手Jenkins",
  352. "tag": "lark_md"
  353. },
  354. "url": "https://jenkins-on.yishihui.com/view/%E7%88%AC%E8%99%AB-Spider/job/%E5%BF%"
  355. "AB%E6%89%8B%E5%B0%8F%E7%A8%8B%E5%BA%8F-%E8%A7%86%E9%A2%91%E7%88%AC%E5%8F%96/",
  356. "type": "default",
  357. "value": {}
  358. }
  359. ],
  360. "tag": "action"
  361. }],
  362. "header": {
  363. "title": {
  364. "content": "📣有新的报警,请注意查处",
  365. "tag": "plain_text"
  366. }
  367. }
  368. }
  369. })
  370. urllib3.disable_warnings()
  371. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  372. Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
  373. except Exception as e:
  374. Common.logger(log_type).error("bot异常:{}", e)
  375. if __name__ == "__main__":
  376. Feishu.bot("kuaishou", "我是快手测试内容,请忽略")
  377. pass