feishu_lib.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/8/9
  4. import json
  5. import requests
  6. import urllib3
  7. from main.common import Common
  8. proxies = {"http": None, "https": None}
  9. class Feishu:
  10. """
  11. 编辑飞书云文档
  12. """
  13. # 看一看爬虫数据表
  14. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  15. # 快手爬虫数据表
  16. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  17. # 微视爬虫数据表
  18. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  19. # 小年糕爬虫数据表
  20. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  21. # 音乐相册
  22. music_album = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?"
  23. # 本山祝福数据表
  24. crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
  25. # 公众号爬虫表
  26. gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
  27. # 数据监控表
  28. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  29. # 微群视频爬虫表
  30. crawler_weiqun_video = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc?"
  31. # 视频号爬虫表
  32. crawler_shipinhao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?'
  33. # 西瓜视频
  34. crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
  35. # 知乎 PC 端
  36. crawler_zhihu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?'
  37. # 吉祥幸福
  38. crawler_jixiangxingfu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf?'
  39. # 众妙音信
  40. crawler_zmyx = 'https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve?'
  41. # 岁岁年年迎福气
  42. crawler_ssnnyfq = 'https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?'
  43. # 飞书路径token
  44. @classmethod
  45. def spreadsheettoken(cls, crawler):
  46. """
  47. :param crawler: 哪个爬虫
  48. """
  49. if crawler == "kanyikan":
  50. return "shtcngRPoDYAi24x52j2nDuHMih"
  51. elif crawler == "kuaishou":
  52. return "shtcnICEfaw9llDNQkKgdymM1xf"
  53. elif crawler == "weishi":
  54. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  55. elif crawler == "xiaoniangao":
  56. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  57. elif crawler == "monitor":
  58. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  59. elif crawler == "music_album":
  60. return "shtcnT6zvmfsYe1g0iv4pt7855g"
  61. elif crawler == "bszf":
  62. return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
  63. elif crawler == "gzh":
  64. return "shtcnexNXnpDLHhARw0QdiwbYuA"
  65. elif crawler == "weiqun":
  66. return "shtcnoKThNquYRweaylMFVyo9Hc"
  67. elif crawler == 'shipinhao':
  68. return 'shtcn9rOdZRAGFbRkWpn7hqEHGc'
  69. elif crawler == 'xigua':
  70. return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
  71. elif crawler == 'zhihu':
  72. return 'shtcnkGPBmGsjaqapgzouuj8MXe'
  73. elif crawler == 'jxxf':
  74. return 'shtcnSx4nafMbLTq7xl7RHBwHBf'
  75. elif crawler == 'zmyx':
  76. return 'shtcnbZIxstPeM0xshW07b26sve'
  77. elif crawler == 'ssnnyfq':
  78. return 'shtcnyJmJSJynHDLLbLTkySfvZe'
  79. # 获取飞书api token
  80. @classmethod
  81. def get_token(cls, log_type):
  82. """
  83. 获取飞书api token
  84. :return:
  85. """
  86. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  87. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  88. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  89. try:
  90. urllib3.disable_warnings()
  91. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  92. tenant_access_token = response.json()["tenant_access_token"]
  93. return tenant_access_token
  94. except Exception as e:
  95. Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
  96. # 获取表格元数据
  97. @classmethod
  98. def get_metainfo(cls, log_type, crawler):
  99. """
  100. 获取表格元数据
  101. :return:
  102. """
  103. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  104. + cls.spreadsheettoken(crawler) + "/metainfo"
  105. headers = {
  106. "Authorization": "Bearer " + cls.get_token(log_type),
  107. "Content-Type": "application/json; charset=utf-8"
  108. }
  109. params = {
  110. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  111. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  112. }
  113. try:
  114. urllib3.disable_warnings()
  115. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  116. response = json.loads(r.content.decode("utf8"))
  117. return response
  118. except Exception as e:
  119. Common.logger(log_type).error("获取表格元数据异常:{}", e)
  120. # 读取工作表中所有数据
  121. @classmethod
  122. def get_values_batch(cls, log_type, crawler, sheetid):
  123. """
  124. 读取工作表中所有数据
  125. :param log_type: 启用哪个 log
  126. :param crawler: 哪个爬虫
  127. :param sheetid: 哪张表
  128. :return: 所有数据
  129. """
  130. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  131. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  132. headers = {
  133. "Authorization": "Bearer " + cls.get_token(log_type),
  134. "Content-Type": "application/json; charset=utf-8"
  135. }
  136. params = {
  137. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  138. "ranges": sheetid,
  139. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  140. # valueRenderOption=FormattedValue 计算并格式化单元格;
  141. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  142. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  143. "valueRenderOption": "ToString",
  144. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  145. "dateTimeRenderOption": "",
  146. # 返回的用户id类型,可选open_id,union_id
  147. "user_id_type": "open_id"
  148. }
  149. try:
  150. urllib3.disable_warnings()
  151. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  152. # print(r.text)
  153. response = json.loads(r.content.decode("utf8"))
  154. values = response["data"]["valueRanges"][0]["values"]
  155. return values
  156. except Exception as e:
  157. Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
  158. # 工作表,插入行或列
  159. @classmethod
  160. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  161. """
  162. 工作表插入行或列
  163. :param log_type: 日志路径
  164. :param crawler: 哪个爬虫的云文档
  165. :param sheetid:哪张工作表
  166. :param majordimension:行或者列, ROWS、COLUMNS
  167. :param startindex:开始位置
  168. :param endindex:结束位置
  169. """
  170. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  171. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  172. headers = {
  173. "Authorization": "Bearer " + cls.get_token(log_type),
  174. "Content-Type": "application/json; charset=utf-8"
  175. }
  176. body = {
  177. "dimension": {
  178. "sheetId": sheetid,
  179. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  180. "startIndex": startindex, # 开始的位置
  181. "endIndex": endindex # 结束的位置
  182. },
  183. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  184. }
  185. try:
  186. urllib3.disable_warnings()
  187. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  188. Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
  189. except Exception as e:
  190. Common.logger(log_type).error("插入行或列异常:{}", e)
  191. # 写入数据
  192. @classmethod
  193. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  194. """
  195. 写入数据
  196. :param log_type: 日志路径
  197. :param crawler: 哪个爬虫的云文档
  198. :param sheetid:哪张工作表
  199. :param ranges:单元格范围
  200. :param values:写入的具体数据,list
  201. """
  202. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  203. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  204. headers = {
  205. "Authorization": "Bearer " + cls.get_token(log_type),
  206. "Content-Type": "application/json; charset=utf-8"
  207. }
  208. body = {
  209. "valueRanges": [
  210. {
  211. "range": sheetid + "!" + ranges,
  212. "values": values
  213. },
  214. ],
  215. }
  216. try:
  217. urllib3.disable_warnings()
  218. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  219. Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
  220. except Exception as e:
  221. Common.logger(log_type).error("写入数据异常:{}", e)
  222. # 合并单元格
  223. @classmethod
  224. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  225. """
  226. 合并单元格
  227. :param log_type: 日志路径
  228. :param crawler: 哪个爬虫
  229. :param sheetid:哪张工作表
  230. :param ranges:需要合并的单元格范围
  231. """
  232. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  233. + cls.spreadsheettoken(crawler) + "/merge_cells"
  234. headers = {
  235. "Authorization": "Bearer " + cls.get_token(log_type),
  236. "Content-Type": "application/json; charset=utf-8"
  237. }
  238. body = {
  239. "range": sheetid + "!" + ranges,
  240. "mergeType": "MERGE_ROWS"
  241. }
  242. try:
  243. urllib3.disable_warnings()
  244. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  245. Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
  246. except Exception as e:
  247. Common.logger(log_type).error("合并单元格异常:{}", e)
  248. # 读取单元格数据
  249. @classmethod
  250. def get_range_value(cls, log_type, crawler, sheetid, cell):
  251. """
  252. 读取单元格内容
  253. :param log_type: 日志路径
  254. :param crawler: 哪个爬虫
  255. :param sheetid: 哪张工作表
  256. :param cell: 哪个单元格
  257. :return: 单元格内容
  258. """
  259. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  260. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  261. headers = {
  262. "Authorization": "Bearer " + cls.get_token(log_type),
  263. "Content-Type": "application/json; charset=utf-8"
  264. }
  265. params = {
  266. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  267. # valueRenderOption=FormattedValue 计算并格式化单元格;
  268. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  269. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  270. "valueRenderOption": "FormattedValue",
  271. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  272. "dateTimeRenderOption": "",
  273. # 返回的用户id类型,可选open_id,union_id
  274. "user_id_type": "open_id"
  275. }
  276. try:
  277. urllib3.disable_warnings()
  278. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  279. # print(r.text)
  280. return r.json()["data"]["valueRange"]["values"][0]
  281. except Exception as e:
  282. Common.logger(log_type).error("读取单元格数据异常:{}", e)
  283. # 删除行或列,可选 ROWS、COLUMNS
  284. @classmethod
  285. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  286. """
  287. 删除行或列
  288. :param log_type: 日志路径
  289. :param crawler: 哪个爬虫
  290. :param sheetid:工作表
  291. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  292. :param startindex:开始的位置
  293. :param endindex:结束的位置
  294. :return:
  295. """
  296. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  297. + cls.spreadsheettoken(crawler) + "/dimension_range"
  298. headers = {
  299. "Authorization": "Bearer " + cls.get_token(log_type),
  300. "Content-Type": "application/json; charset=utf-8"
  301. }
  302. body = {
  303. "dimension": {
  304. "sheetId": sheetid,
  305. "majorDimension": major_dimension,
  306. "startIndex": startindex,
  307. "endIndex": endindex
  308. }
  309. }
  310. try:
  311. urllib3.disable_warnings()
  312. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  313. Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
  314. except Exception as e:
  315. Common.logger(log_type).error("删除视频数据异常:{}", e)
  316. # 获取用户 ID
  317. @classmethod
  318. def get_userid(cls, log_type, username):
  319. try:
  320. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  321. headers = {
  322. "Authorization": "Bearer " + cls.get_token(log_type),
  323. "Content-Type": "application/json; charset=utf-8"
  324. }
  325. # 手机号
  326. wangkun = "13426262515"
  327. gaonannan = "18501180073"
  328. xinxin = "15546206651"
  329. huxinxue = "18832292015"
  330. wuchaoyue = "15712941385"
  331. if username == "wangkun":
  332. username = wangkun
  333. elif username == "gaonannan":
  334. username = gaonannan
  335. elif username == "xinxin":
  336. username = xinxin
  337. elif username == "huxinxue":
  338. username = huxinxue
  339. elif username == "wuchaoyue":
  340. username = wuchaoyue
  341. data = {"mobiles": [username]}
  342. urllib3.disable_warnings()
  343. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  344. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  345. Common.logger(log_type).info("{}:{}", username, open_id)
  346. # print(f"{username}:{open_id}")
  347. return open_id
  348. except Exception as e:
  349. Common.logger(log_type).error("get_userid异常:{}", e)
  350. # 飞书机器人
  351. @classmethod
  352. def bot(cls, log_type, crawler, text):
  353. try:
  354. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  355. headers = {
  356. 'Content-Type': 'application/json'
  357. }
  358. if crawler == "kanyikan":
  359. content = "看一看爬虫表"
  360. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih"
  361. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  362. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  363. elif crawler == "xiaoniangao_hour":
  364. content = "小年糕_小时级_已下载表"
  365. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
  366. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  367. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  368. elif crawler == "xiaoniangao_person":
  369. content = "小年糕_用户主页_已下载表"
  370. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=Wu0CeL"
  371. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  372. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  373. elif crawler == "xiaoniangao_play":
  374. content = "小年糕_播放量_已下载表"
  375. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=c85k1C"
  376. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  377. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  378. elif crawler == 'xigua_video':
  379. content = '西瓜视频_用户主页_已下载表'
  380. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=e075e9'
  381. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  382. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  383. elif crawler == 'xigua_little_video':
  384. content = '西瓜视频_小视频_已下载表'
  385. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=hDSDnv'
  386. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  387. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  388. elif crawler == 'zhihu_hot':
  389. content = '知乎_热门_已下载表'
  390. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=8871e3'
  391. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  392. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  393. elif crawler == 'zhihu_follow':
  394. content = '知乎_定向_已下载表'
  395. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=4MGuux'
  396. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  397. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  398. elif crawler == "music_album":
  399. content = "音乐相册爬虫表"
  400. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g"
  401. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  402. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  403. elif crawler == "bszf":
  404. content = "本山祝福爬虫表"
  405. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb"
  406. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  407. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  408. elif crawler == "jxxf":
  409. content = "吉祥幸福爬虫表"
  410. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf"
  411. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  412. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  413. elif crawler == "zmyx":
  414. content = "众妙音信爬虫表"
  415. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve"
  416. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  417. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  418. elif crawler == "kuaishou_follow":
  419. content = "快手_用户主页_已下载表"
  420. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=fYdA8F"
  421. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  422. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  423. elif crawler == "kuaishou_recommend":
  424. content = "快手_推荐榜_已下载表"
  425. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=3cd128"
  426. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  427. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  428. elif crawler == "ssnnyfq":
  429. content = "岁岁年年迎福气_已下载表"
  430. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?sheet=290bae"
  431. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  432. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  433. elif crawler == "gzh":
  434. content = "公众号爬虫表"
  435. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA"
  436. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  437. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  438. elif crawler == "weiqun":
  439. content = "微群爬虫表"
  440. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc"
  441. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  442. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  443. elif crawler == "weishi":
  444. content = "微视爬虫表"
  445. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh"
  446. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  447. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  448. elif crawler == "shipinhao":
  449. content = "视频号爬虫表"
  450. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc"
  451. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  452. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  453. else:
  454. content = "小年糕爬虫表"
  455. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  456. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n"
  457. data = json.dumps({
  458. "msg_type": "interactive",
  459. "card": {
  460. "config": {
  461. "wide_screen_mode": True,
  462. "enable_forward": True
  463. },
  464. "elements": [{
  465. "tag": "div",
  466. "text": {
  467. "content": users + text,
  468. "tag": "lark_md"
  469. }
  470. }, {
  471. "actions": [{
  472. "tag": "button",
  473. "text": {
  474. "content": content,
  475. "tag": "lark_md"
  476. },
  477. "url": sheet_url,
  478. "type": "default",
  479. "value": {}
  480. }],
  481. "tag": "action"
  482. }],
  483. "header": {
  484. "title": {
  485. "content": "📣您有新的报警,请注意查收",
  486. "tag": "plain_text"
  487. }
  488. }
  489. }
  490. })
  491. urllib3.disable_warnings()
  492. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  493. Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
  494. except Exception as e:
  495. Common.logger(log_type).error("bot异常:{}", e)
  496. if __name__ == "__main__":
  497. Feishu.bot("bot", "ssnnyfq", "别紧张,测试一下。")
  498. # Feishu.get_userid("kuaishou", "huxinxue")
  499. pass