feishu_lib.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/8/9
  4. import json
  5. import requests
  6. import urllib3
  7. from main.common import Common
  8. proxies = {"http": None, "https": None}
  9. class Feishu:
  10. """
  11. 编辑飞书云文档
  12. """
  13. # 看一看爬虫数据表
  14. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  15. # 快手爬虫数据表
  16. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  17. # 微视爬虫数据表
  18. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  19. # 小年糕爬虫数据表
  20. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  21. # 音乐相册
  22. music_album = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?"
  23. # 本山祝福数据表
  24. crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
  25. # 公众号爬虫表
  26. gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
  27. # 数据监控表
  28. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  29. # 微群视频爬虫表
  30. crawler_weiqun_video = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc?"
  31. # 视频号爬虫表
  32. crawler_shipinhao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?'
  33. # 西瓜视频
  34. crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
  35. # 知乎 PC 端
  36. crawler_zhihu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?'
  37. # 飞书路径token
  38. @classmethod
  39. def spreadsheettoken(cls, crawler):
  40. """
  41. :param crawler: 哪个爬虫
  42. """
  43. if crawler == "kanyikan":
  44. return "shtcngRPoDYAi24x52j2nDuHMih"
  45. elif crawler == "kuaishou":
  46. return "shtcnICEfaw9llDNQkKgdymM1xf"
  47. elif crawler == "weishi":
  48. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  49. elif crawler == "xiaoniangao":
  50. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  51. elif crawler == "monitor":
  52. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  53. elif crawler == "music_album":
  54. return "shtcnT6zvmfsYe1g0iv4pt7855g"
  55. elif crawler == "bszf":
  56. return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
  57. elif crawler == "gzh":
  58. return "shtcnexNXnpDLHhARw0QdiwbYuA"
  59. elif crawler == "weiqun":
  60. return "shtcnoKThNquYRweaylMFVyo9Hc"
  61. elif crawler == 'shipinhao':
  62. return 'shtcn9rOdZRAGFbRkWpn7hqEHGc'
  63. elif crawler == 'xigua':
  64. return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
  65. elif crawler == 'zhihu':
  66. return 'shtcnkGPBmGsjaqapgzouuj8MXe'
  67. # 获取飞书api token
  68. @classmethod
  69. def get_token(cls, log_type):
  70. """
  71. 获取飞书api token
  72. :return:
  73. """
  74. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  75. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  76. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  77. try:
  78. urllib3.disable_warnings()
  79. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  80. tenant_access_token = response.json()["tenant_access_token"]
  81. return tenant_access_token
  82. except Exception as e:
  83. Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
  84. # 获取表格元数据
  85. @classmethod
  86. def get_metainfo(cls, log_type, crawler):
  87. """
  88. 获取表格元数据
  89. :return:
  90. """
  91. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  92. + cls.spreadsheettoken(crawler) + "/metainfo"
  93. headers = {
  94. "Authorization": "Bearer " + cls.get_token(log_type),
  95. "Content-Type": "application/json; charset=utf-8"
  96. }
  97. params = {
  98. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  99. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  100. }
  101. try:
  102. urllib3.disable_warnings()
  103. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  104. response = json.loads(r.content.decode("utf8"))
  105. return response
  106. except Exception as e:
  107. Common.logger(log_type).error("获取表格元数据异常:{}", e)
  108. # 读取工作表中所有数据
  109. @classmethod
  110. def get_values_batch(cls, log_type, crawler, sheetid):
  111. """
  112. 读取工作表中所有数据
  113. :param log_type: 启用哪个 log
  114. :param crawler: 哪个爬虫
  115. :param sheetid: 哪张表
  116. :return: 所有数据
  117. """
  118. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  119. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  120. headers = {
  121. "Authorization": "Bearer " + cls.get_token(log_type),
  122. "Content-Type": "application/json; charset=utf-8"
  123. }
  124. params = {
  125. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  126. "ranges": sheetid,
  127. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  128. # valueRenderOption=FormattedValue 计算并格式化单元格;
  129. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  130. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  131. "valueRenderOption": "ToString",
  132. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  133. "dateTimeRenderOption": "",
  134. # 返回的用户id类型,可选open_id,union_id
  135. "user_id_type": "open_id"
  136. }
  137. try:
  138. urllib3.disable_warnings()
  139. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  140. # print(r.text)
  141. response = json.loads(r.content.decode("utf8"))
  142. values = response["data"]["valueRanges"][0]["values"]
  143. return values
  144. except Exception as e:
  145. Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
  146. # 工作表,插入行或列
  147. @classmethod
  148. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  149. """
  150. 工作表插入行或列
  151. :param log_type: 日志路径
  152. :param crawler: 哪个爬虫的云文档
  153. :param sheetid:哪张工作表
  154. :param majordimension:行或者列, ROWS、COLUMNS
  155. :param startindex:开始位置
  156. :param endindex:结束位置
  157. """
  158. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  159. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  160. headers = {
  161. "Authorization": "Bearer " + cls.get_token(log_type),
  162. "Content-Type": "application/json; charset=utf-8"
  163. }
  164. body = {
  165. "dimension": {
  166. "sheetId": sheetid,
  167. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  168. "startIndex": startindex, # 开始的位置
  169. "endIndex": endindex # 结束的位置
  170. },
  171. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  172. }
  173. try:
  174. urllib3.disable_warnings()
  175. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  176. Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
  177. except Exception as e:
  178. Common.logger(log_type).error("插入行或列异常:{}", e)
  179. # 写入数据
  180. @classmethod
  181. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  182. """
  183. 写入数据
  184. :param log_type: 日志路径
  185. :param crawler: 哪个爬虫的云文档
  186. :param sheetid:哪张工作表
  187. :param ranges:单元格范围
  188. :param values:写入的具体数据,list
  189. """
  190. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  191. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  192. headers = {
  193. "Authorization": "Bearer " + cls.get_token(log_type),
  194. "Content-Type": "application/json; charset=utf-8"
  195. }
  196. body = {
  197. "valueRanges": [
  198. {
  199. "range": sheetid + "!" + ranges,
  200. "values": values
  201. },
  202. ],
  203. }
  204. try:
  205. urllib3.disable_warnings()
  206. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  207. Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
  208. except Exception as e:
  209. Common.logger(log_type).error("写入数据异常:{}", e)
  210. # 合并单元格
  211. @classmethod
  212. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  213. """
  214. 合并单元格
  215. :param log_type: 日志路径
  216. :param crawler: 哪个爬虫
  217. :param sheetid:哪张工作表
  218. :param ranges:需要合并的单元格范围
  219. """
  220. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  221. + cls.spreadsheettoken(crawler) + "/merge_cells"
  222. headers = {
  223. "Authorization": "Bearer " + cls.get_token(log_type),
  224. "Content-Type": "application/json; charset=utf-8"
  225. }
  226. body = {
  227. "range": sheetid + "!" + ranges,
  228. "mergeType": "MERGE_ROWS"
  229. }
  230. try:
  231. urllib3.disable_warnings()
  232. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  233. Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
  234. except Exception as e:
  235. Common.logger(log_type).error("合并单元格异常:{}", e)
  236. # 读取单元格数据
  237. @classmethod
  238. def get_range_value(cls, log_type, crawler, sheetid, cell):
  239. """
  240. 读取单元格内容
  241. :param log_type: 日志路径
  242. :param crawler: 哪个爬虫
  243. :param sheetid: 哪张工作表
  244. :param cell: 哪个单元格
  245. :return: 单元格内容
  246. """
  247. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  248. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  249. headers = {
  250. "Authorization": "Bearer " + cls.get_token(log_type),
  251. "Content-Type": "application/json; charset=utf-8"
  252. }
  253. params = {
  254. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  255. # valueRenderOption=FormattedValue 计算并格式化单元格;
  256. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  257. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  258. "valueRenderOption": "FormattedValue",
  259. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  260. "dateTimeRenderOption": "",
  261. # 返回的用户id类型,可选open_id,union_id
  262. "user_id_type": "open_id"
  263. }
  264. try:
  265. urllib3.disable_warnings()
  266. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  267. # print(r.text)
  268. return r.json()["data"]["valueRange"]["values"][0]
  269. except Exception as e:
  270. Common.logger(log_type).error("读取单元格数据异常:{}", e)
  271. # 删除行或列,可选 ROWS、COLUMNS
  272. @classmethod
  273. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  274. """
  275. 删除行或列
  276. :param log_type: 日志路径
  277. :param crawler: 哪个爬虫
  278. :param sheetid:工作表
  279. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  280. :param startindex:开始的位置
  281. :param endindex:结束的位置
  282. :return:
  283. """
  284. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  285. + cls.spreadsheettoken(crawler) + "/dimension_range"
  286. headers = {
  287. "Authorization": "Bearer " + cls.get_token(log_type),
  288. "Content-Type": "application/json; charset=utf-8"
  289. }
  290. body = {
  291. "dimension": {
  292. "sheetId": sheetid,
  293. "majorDimension": major_dimension,
  294. "startIndex": startindex,
  295. "endIndex": endindex
  296. }
  297. }
  298. try:
  299. urllib3.disable_warnings()
  300. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  301. Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
  302. except Exception as e:
  303. Common.logger(log_type).error("删除视频数据异常:{}", e)
  304. # 获取用户 ID
  305. @classmethod
  306. def get_userid(cls, log_type, username):
  307. try:
  308. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  309. headers = {
  310. "Authorization": "Bearer " + cls.get_token(log_type),
  311. "Content-Type": "application/json; charset=utf-8"
  312. }
  313. # 手机号
  314. wangkun = "13426262515"
  315. gaonannan = "18501180073"
  316. xinxin = "15546206651"
  317. huxinxue = "18832292015"
  318. wuchaoyue = "15712941385"
  319. if username == "wangkun":
  320. username = wangkun
  321. elif username == "gaonannan":
  322. username = gaonannan
  323. elif username == "xinxin":
  324. username = xinxin
  325. elif username == "huxinxue":
  326. username = huxinxue
  327. elif username == "wuchaoyue":
  328. username = wuchaoyue
  329. data = {"mobiles": [username]}
  330. urllib3.disable_warnings()
  331. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  332. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  333. Common.logger(log_type).info("{}:{}", username, open_id)
  334. # print(f"{username}:{open_id}")
  335. return open_id
  336. except Exception as e:
  337. Common.logger(log_type).error("get_userid异常:{}", e)
  338. # 飞书机器人
  339. @classmethod
  340. def bot(cls, log_type, crawler, text):
  341. try:
  342. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  343. headers = {
  344. 'Content-Type': 'application/json'
  345. }
  346. if crawler == "kanyikan":
  347. content = "看一看爬虫表"
  348. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih"
  349. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  350. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  351. elif crawler == "xiaoniangao_hour":
  352. content = "小年糕_小时级_已下载表"
  353. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
  354. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  355. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  356. elif crawler == "xiaoniangao_person":
  357. content = "小年糕_用户主页_已下载表"
  358. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=Wu0CeL"
  359. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  360. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  361. elif crawler == "xiaoniangao_play":
  362. content = "小年糕_播放量_已下载表"
  363. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=c85k1C"
  364. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  365. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  366. elif crawler == 'xigua_video':
  367. content = '西瓜视频_用户主页_已下载表'
  368. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=e075e9'
  369. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  370. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  371. elif crawler == 'xigua_little_video':
  372. content = '西瓜视频_小视频_已下载表'
  373. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=hDSDnv'
  374. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  375. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  376. elif crawler == 'zhihu_hot':
  377. content = '知乎_热门_已下载表'
  378. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=8871e3'
  379. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  380. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  381. elif crawler == 'zhihu_follow':
  382. content = '知乎_定向_已下载表'
  383. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=4MGuux'
  384. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  385. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  386. elif crawler == "music_album":
  387. content = "音乐相册爬虫表"
  388. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g"
  389. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  390. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  391. elif crawler == "bszf":
  392. content = "本山祝福爬虫表"
  393. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb"
  394. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  395. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  396. elif crawler == "kuaishou_follow":
  397. content = "快手_用户主页_已下载表"
  398. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=fYdA8F"
  399. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  400. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  401. elif crawler == "kuaishou_recommend":
  402. content = "快手_推荐榜_已下载表"
  403. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=3cd128"
  404. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  405. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  406. elif crawler == "gzh":
  407. content = "公众号爬虫表"
  408. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA"
  409. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  410. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  411. elif crawler == "weiqun":
  412. content = "微群爬虫表"
  413. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc"
  414. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  415. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  416. elif crawler == "weishi":
  417. content = "微视爬虫表"
  418. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh"
  419. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  420. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  421. elif crawler == "shipinhao":
  422. content = "视频号爬虫表"
  423. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc"
  424. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  425. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  426. else:
  427. content = "小年糕爬虫表"
  428. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  429. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n"
  430. data = json.dumps({
  431. "msg_type": "interactive",
  432. "card": {
  433. "config": {
  434. "wide_screen_mode": True,
  435. "enable_forward": True
  436. },
  437. "elements": [{
  438. "tag": "div",
  439. "text": {
  440. "content": users + text,
  441. "tag": "lark_md"
  442. }
  443. }, {
  444. "actions": [{
  445. "tag": "button",
  446. "text": {
  447. "content": content,
  448. "tag": "lark_md"
  449. },
  450. "url": sheet_url,
  451. "type": "default",
  452. "value": {}
  453. }],
  454. "tag": "action"
  455. }],
  456. "header": {
  457. "title": {
  458. "content": "📣您有新的报警,请注意查收",
  459. "tag": "plain_text"
  460. }
  461. }
  462. }
  463. })
  464. urllib3.disable_warnings()
  465. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  466. Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
  467. except Exception as e:
  468. Common.logger(log_type).error("bot异常:{}", e)
  469. if __name__ == "__main__":
  470. Feishu.bot("bot", "zhihu_hot", "嘿。嘿嘿。。嘿嘿嘿")
  471. # Feishu.get_userid("kuaishou", "huxinxue")
  472. pass