feishu.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/1/31
  4. """
  5. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  6. """
  7. import json
  8. import os
  9. import sys
  10. import requests
  11. import urllib3
  12. sys.path.append(os.getcwd())
  13. from common.common import Common
  14. proxies = {"http": None, "https": None}
  15. class Feishu:
  16. """
  17. 编辑飞书云文档
  18. """
  19. # 看一看爬虫数据表
  20. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  21. # 快手爬虫数据表
  22. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  23. # 微视爬虫数据表
  24. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  25. # 小年糕爬虫数据表
  26. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  27. # 音乐相册
  28. music_album = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?"
  29. # 本山祝福数据表
  30. crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
  31. # 公众号爬虫表
  32. gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
  33. # 数据监控表
  34. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  35. # 微群视频爬虫表
  36. crawler_weiqun_video = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc?"
  37. # 视频号爬虫表
  38. crawler_shipinhao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?'
  39. # 西瓜视频
  40. crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
  41. # 知乎 PC 端
  42. crawler_zhihu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?'
  43. # 吉祥幸福
  44. crawler_jixiangxingfu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf?'
  45. # 众妙音信
  46. crawler_zmyx = 'https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve?'
  47. # 岁岁年年迎福气
  48. crawler_ssnnyfq = 'https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?'
  49. # 祝福猫视频
  50. crawler_zhufumao = 'https://w42nne6hzg.feishu.cn/sheets/shtcnXfIJthvkjhI5zlEJq84i6g?'
  51. # 宗教公众号
  52. crawler_zongjiao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb?'
  53. # 好看视频
  54. crawler_haokan = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd'
  55. # 看到就是福气
  56. crawler_kandaojiushifuqi = 'https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb'
  57. # 胜胜影音
  58. crawler_shengshengyingyin = 'https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe'
  59. # 刚刚都传
  60. crawler_ganggangdouchuan = 'https://w42nne6hzg.feishu.cn/sheets/shtcnTuJgeZU2bc7VaesAqk3QJx'
  61. # 公众号_信欣
  62. crawler_gongzhonghao = 'https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?'
  63. # YouTube
  64. crawler_youtube = 'https://w42nne6hzg.feishu.cn/sheets/shtcnrLyr1zbYbhhZyqpN7Xrd5f?'
  65. # 微信指数
  66. weixinzhishu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?'
  67. # 微信指数_搜索词
  68. weixinzhishu_search_word = 'https://w42nne6hzg.feishu.cn/sheets/shtcnHxCj6dZBYMuK1Q3tIJVlqg?'
  69. # 飞书路径token
  70. @classmethod
  71. def spreadsheettoken(cls, crawler):
  72. """
  73. :param crawler: 哪个爬虫
  74. """
  75. if crawler == "kanyikan":
  76. return "shtcngRPoDYAi24x52j2nDuHMih"
  77. elif crawler == "kuaishou":
  78. return "shtcnICEfaw9llDNQkKgdymM1xf"
  79. elif crawler == "weishi":
  80. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  81. elif crawler == "xiaoniangao":
  82. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  83. elif crawler == "monitor":
  84. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  85. elif crawler == "music_album":
  86. return "shtcnT6zvmfsYe1g0iv4pt7855g"
  87. elif crawler == "bszf":
  88. return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
  89. elif crawler == "gzh":
  90. return "shtcnexNXnpDLHhARw0QdiwbYuA"
  91. elif crawler == "weiqun":
  92. return "shtcnoKThNquYRweaylMFVyo9Hc"
  93. elif crawler == 'shipinhao':
  94. return 'shtcn9rOdZRAGFbRkWpn7hqEHGc'
  95. elif crawler == 'xigua':
  96. return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
  97. elif crawler == 'zhihu':
  98. return 'shtcnkGPBmGsjaqapgzouuj8MXe'
  99. elif crawler == 'jxxf':
  100. return 'shtcnSx4nafMbLTq7xl7RHBwHBf'
  101. elif crawler == 'zmyx':
  102. return 'shtcnbZIxstPeM0xshW07b26sve'
  103. elif crawler == 'ssnnyfq':
  104. return 'shtcnyJmJSJynHDLLbLTkySfvZe'
  105. elif crawler == 'zhufumao':
  106. return 'shtcnXfIJthvkjhI5zlEJq84i6g'
  107. elif crawler == 'zongjiao':
  108. return 'shtcn73NW0CyoOeF21HWO15KBsb'
  109. elif crawler == 'haokan':
  110. return 'shtcnaYz8Nhv8q6DbWtlL6rMEBd'
  111. elif crawler == 'kdjsfq':
  112. return 'shtcnEokBkIjOUPAk8vbbPKnXgb'
  113. elif crawler == 'ssyy':
  114. return 'shtcnz1ymxHL1u8WHblfqfys7qe'
  115. elif crawler == 'ggdc':
  116. return 'shtcnTuJgeZU2bc7VaesAqk3QJx'
  117. elif crawler == 'gongzhonghao_xinxin':
  118. return 'shtcna98M2mX7TbivTj9Sb7WKBN'
  119. elif crawler == 'youtube':
  120. return 'shtcnrLyr1zbYbhhZyqpN7Xrd5f'
  121. elif crawler == 'weixinzhishu':
  122. return 'shtcnqhMRUGunIfGnGXMOBYiy4K'
  123. elif crawler == 'weixinzhishu_search_word':
  124. return 'shtcnHxCj6dZBYMuK1Q3tIJVlqg'
  125. # 获取飞书api token
  126. @classmethod
  127. def get_token(cls, log_type, crawler):
  128. """
  129. 获取飞书api token
  130. :return:
  131. """
  132. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  133. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  134. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  135. try:
  136. urllib3.disable_warnings()
  137. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  138. tenant_access_token = response.json()["tenant_access_token"]
  139. return tenant_access_token
  140. except Exception as e:
  141. Common.logger(log_type, crawler).error("获取飞书 api token 异常:{}", e)
  142. # 获取表格元数据
  143. @classmethod
  144. def get_metainfo(cls, log_type, crawler):
  145. """
  146. 获取表格元数据
  147. :return:
  148. """
  149. try:
  150. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  151. + cls.spreadsheettoken(crawler) + "/metainfo"
  152. headers = {
  153. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  154. "Content-Type": "application/json; charset=utf-8"
  155. }
  156. params = {
  157. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  158. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  159. }
  160. urllib3.disable_warnings()
  161. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  162. response = json.loads(r.content.decode("utf8"))
  163. return response
  164. except Exception as e:
  165. Common.logger(log_type, crawler).error("获取表格元数据异常:{}", e)
  166. # 读取工作表中所有数据
  167. @classmethod
  168. def get_values_batch(cls, log_type, crawler, sheetid):
  169. """
  170. 读取工作表中所有数据
  171. :param log_type: 启用哪个 log
  172. :param crawler: 哪个爬虫
  173. :param sheetid: 哪张表
  174. :return: 所有数据
  175. """
  176. try:
  177. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  178. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  179. headers = {
  180. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  181. "Content-Type": "application/json; charset=utf-8"
  182. }
  183. params = {
  184. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  185. "ranges": sheetid,
  186. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  187. # valueRenderOption=FormattedValue 计算并格式化单元格;
  188. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  189. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  190. "valueRenderOption": "ToString",
  191. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  192. "dateTimeRenderOption": "",
  193. # 返回的用户id类型,可选open_id,union_id
  194. "user_id_type": "open_id"
  195. }
  196. urllib3.disable_warnings()
  197. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  198. # print(r.text)
  199. response = json.loads(r.content.decode("utf8"))
  200. values = response["data"]["valueRanges"][0]["values"]
  201. return values
  202. except Exception as e:
  203. Common.logger(log_type, crawler).error("读取工作表所有数据异常:{}", e)
  204. # 工作表,插入行或列
  205. @classmethod
  206. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  207. """
  208. 工作表插入行或列
  209. :param log_type: 日志路径
  210. :param crawler: 哪个爬虫的云文档
  211. :param sheetid:哪张工作表
  212. :param majordimension:行或者列, ROWS、COLUMNS
  213. :param startindex:开始位置
  214. :param endindex:结束位置
  215. """
  216. try:
  217. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  218. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  219. headers = {
  220. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  221. "Content-Type": "application/json; charset=utf-8"
  222. }
  223. body = {
  224. "dimension": {
  225. "sheetId": sheetid,
  226. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  227. "startIndex": startindex, # 开始的位置
  228. "endIndex": endindex # 结束的位置
  229. },
  230. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  231. }
  232. urllib3.disable_warnings()
  233. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  234. Common.logger(log_type, crawler).info("插入行或列:{}", r.json()["msg"])
  235. except Exception as e:
  236. Common.logger(log_type, crawler).error("插入行或列异常:{}", e)
  237. # 写入数据
  238. @classmethod
  239. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  240. """
  241. 写入数据
  242. :param log_type: 日志路径
  243. :param crawler: 哪个爬虫的云文档
  244. :param sheetid:哪张工作表
  245. :param ranges:单元格范围
  246. :param values:写入的具体数据,list
  247. """
  248. try:
  249. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  250. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  251. headers = {
  252. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  253. "Content-Type": "application/json; charset=utf-8"
  254. }
  255. body = {
  256. "valueRanges": [
  257. {
  258. "range": sheetid + "!" + ranges,
  259. "values": values
  260. },
  261. ],
  262. }
  263. urllib3.disable_warnings()
  264. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  265. Common.logger(log_type, crawler).info("写入数据:{}", r.json()["msg"])
  266. except Exception as e:
  267. Common.logger(log_type, crawler).error("写入数据异常:{}", e)
  268. # 合并单元格
  269. @classmethod
  270. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  271. """
  272. 合并单元格
  273. :param log_type: 日志路径
  274. :param crawler: 哪个爬虫
  275. :param sheetid:哪张工作表
  276. :param ranges:需要合并的单元格范围
  277. """
  278. try:
  279. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  280. + cls.spreadsheettoken(crawler) + "/merge_cells"
  281. headers = {
  282. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  283. "Content-Type": "application/json; charset=utf-8"
  284. }
  285. body = {
  286. "range": sheetid + "!" + ranges,
  287. "mergeType": "MERGE_ROWS"
  288. }
  289. urllib3.disable_warnings()
  290. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  291. Common.logger(log_type, crawler).info("合并单元格:{}", r.json()["msg"])
  292. except Exception as e:
  293. Common.logger(log_type, crawler).error("合并单元格异常:{}", e)
  294. # 读取单元格数据
  295. @classmethod
  296. def get_range_value(cls, log_type, crawler, sheetid, cell):
  297. """
  298. 读取单元格内容
  299. :param log_type: 日志路径
  300. :param crawler: 哪个爬虫
  301. :param sheetid: 哪张工作表
  302. :param cell: 哪个单元格
  303. :return: 单元格内容
  304. """
  305. try:
  306. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  307. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  308. headers = {
  309. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  310. "Content-Type": "application/json; charset=utf-8"
  311. }
  312. params = {
  313. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  314. # valueRenderOption=FormattedValue 计算并格式化单元格;
  315. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  316. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  317. "valueRenderOption": "FormattedValue",
  318. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  319. "dateTimeRenderOption": "",
  320. # 返回的用户id类型,可选open_id,union_id
  321. "user_id_type": "open_id"
  322. }
  323. urllib3.disable_warnings()
  324. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  325. # print(r.text)
  326. return r.json()["data"]["valueRange"]["values"][0]
  327. except Exception as e:
  328. Common.logger(log_type, crawler).error("读取单元格数据异常:{}", e)
  329. # 获取表内容
  330. @classmethod
  331. def get_sheet_content(cls, log_type, crawler, sheet_id):
  332. try:
  333. sheet = Feishu.get_values_batch(log_type, crawler, sheet_id)
  334. content_list = []
  335. for x in sheet:
  336. for y in x:
  337. if y is None:
  338. pass
  339. else:
  340. content_list.append(y)
  341. return content_list
  342. except Exception as e:
  343. Common.logger(log_type, crawler).error(f'get_sheet_content:{e}\n')
  344. # 删除行或列,可选 ROWS、COLUMNS
  345. @classmethod
  346. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  347. """
  348. 删除行或列
  349. :param log_type: 日志路径
  350. :param crawler: 哪个爬虫
  351. :param sheetid:工作表
  352. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  353. :param startindex:开始的位置
  354. :param endindex:结束的位置
  355. :return:
  356. """
  357. try:
  358. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  359. + cls.spreadsheettoken(crawler) + "/dimension_range"
  360. headers = {
  361. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  362. "Content-Type": "application/json; charset=utf-8"
  363. }
  364. body = {
  365. "dimension": {
  366. "sheetId": sheetid,
  367. "majorDimension": major_dimension,
  368. "startIndex": startindex,
  369. "endIndex": endindex
  370. }
  371. }
  372. urllib3.disable_warnings()
  373. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  374. Common.logger(log_type, crawler).info("删除视频数据:{}", r.json()["msg"])
  375. except Exception as e:
  376. Common.logger(log_type, crawler).error("删除视频数据异常:{}", e)
  377. # 获取用户 ID
  378. @classmethod
  379. def get_userid(cls, log_type, crawler, username):
  380. try:
  381. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  382. headers = {
  383. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  384. "Content-Type": "application/json; charset=utf-8"
  385. }
  386. # 手机号
  387. wangkun = "13426262515"
  388. gaonannan = "18501180073"
  389. xinxin = "15546206651"
  390. huxinxue = "18832292015"
  391. wuchaoyue = "15712941385"
  392. lijinchao = '18524120540'
  393. if username == "wangkun":
  394. username = wangkun
  395. elif username == "gaonannan":
  396. username = gaonannan
  397. elif username == "xinxin":
  398. username = xinxin
  399. elif username == "huxinxue":
  400. username = huxinxue
  401. elif username == "wuchaoyue":
  402. username = wuchaoyue
  403. elif username == "lijinchao":
  404. username = lijinchao
  405. data = {"mobiles": [username]}
  406. urllib3.disable_warnings()
  407. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  408. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  409. Common.logger(log_type, crawler).info(f"{username}:{open_id}")
  410. # print(f"{username}:{open_id}")
  411. return open_id
  412. except Exception as e:
  413. Common.logger(log_type, crawler).error(f"get_userid异常:{e}\n")
  414. # 飞书机器人
  415. @classmethod
  416. def bot(cls, log_type, crawler, text):
  417. try:
  418. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  419. headers = {'Content-Type': 'application/json'}
  420. if crawler == "kanyikan":
  421. content = "看一看爬虫表"
  422. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih"
  423. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  424. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  425. elif crawler == "xiaoniangao_hour":
  426. content = "小年糕_小时级_已下载表"
  427. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
  428. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  429. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  430. elif crawler == "xiaoniangao_person":
  431. content = "小年糕_用户主页_已下载表"
  432. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=Wu0CeL"
  433. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  434. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  435. elif crawler == "xiaoniangao_play":
  436. content = "小年糕_播放量_已下载表"
  437. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=c85k1C"
  438. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  439. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  440. elif crawler == 'xigua':
  441. content = '西瓜视频_用户主页_已下载表'
  442. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=e075e9'
  443. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  444. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  445. elif crawler == 'xigua_little_video':
  446. content = '西瓜视频_小视频_已下载表'
  447. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=hDSDnv'
  448. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  449. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  450. elif crawler == 'zhihu_hot':
  451. content = '知乎_热门_已下载表'
  452. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=8871e3'
  453. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  454. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  455. elif crawler == 'zhihu_follow':
  456. content = '知乎_定向_已下载表'
  457. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=4MGuux'
  458. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  459. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  460. elif crawler == 'haokan_hot':
  461. content = '好看_热榜_已下载表'
  462. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=5pWipX'
  463. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  464. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  465. elif crawler == 'haokan_channel':
  466. content = '好看_频道_已下载表'
  467. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=7f05d8'
  468. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  469. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  470. elif crawler == 'haokan_follow':
  471. content = '好看_定向_已下载表'
  472. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=kVaSjf'
  473. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  474. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  475. elif crawler == "music_album":
  476. content = "音乐相册爬虫表"
  477. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g"
  478. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  479. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  480. elif crawler == "ssyy":
  481. content = "胜胜影音爬虫表"
  482. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe"
  483. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  484. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  485. elif crawler == "ggdc":
  486. content = "刚刚都传爬虫表"
  487. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnTuJgeZU2bc7VaesAqk3QJx"
  488. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  489. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  490. elif crawler == "bszf":
  491. content = "本山祝福爬虫表"
  492. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb"
  493. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  494. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  495. elif crawler == "jxxf":
  496. content = "吉祥幸福爬虫表"
  497. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf"
  498. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  499. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  500. elif crawler == "zmyx":
  501. content = "众妙音信爬虫表"
  502. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve"
  503. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  504. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  505. elif crawler == "zhufumao":
  506. content = "祝福猫视频爬虫表"
  507. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnXfIJthvkjhI5zlEJq84i6g"
  508. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  509. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  510. elif crawler == "kuaishou_follow":
  511. content = "快手_用户主页_已下载表"
  512. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=fYdA8F"
  513. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  514. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  515. elif crawler == "kuaishou_recommend":
  516. content = "快手_推荐榜_已下载表"
  517. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=3cd128"
  518. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  519. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  520. elif crawler == "ssnnyfq":
  521. content = "岁岁年年迎福气_已下载表"
  522. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?sheet=290bae"
  523. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  524. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  525. elif crawler == "kdjsfq":
  526. content = "看到就是福气_已下载表"
  527. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb?sheet=ad3b6d"
  528. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  529. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  530. elif crawler == "gzh":
  531. content = "公众号爬虫表"
  532. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA"
  533. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  534. cls.get_userid(log_type, crawler, "huxinxue")) + "></at>\n"
  535. elif crawler == "gongzhonghao_xinxin":
  536. content = "公众号_信欣_爬虫表"
  537. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?"
  538. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  539. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  540. elif crawler == "weiqun":
  541. content = "微群爬虫表"
  542. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc"
  543. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  544. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  545. elif crawler == "weishi":
  546. content = "微视爬虫表"
  547. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh"
  548. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  549. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  550. elif crawler == "shipinhao_recommend":
  551. content = "视频号_推荐_已下载表"
  552. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?sheet=c77cf9"
  553. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  554. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  555. elif crawler == "shipinhao_follow":
  556. content = "视频号_定向_已下载表"
  557. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?sheet=KsVtLe"
  558. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  559. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  560. elif crawler == "youtube":
  561. content = "youtube_定向_已下载表"
  562. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnrLyr1zbYbhhZyqpN7Xrd5f?sheet=GVxlYk"
  563. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  564. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  565. elif crawler == "zongjiao":
  566. content = "宗教公众号爬虫表"
  567. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb"
  568. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  569. cls.get_userid(log_type, crawler, "huxinxue")) + "></at>\n"
  570. else:
  571. content = "小年糕爬虫表"
  572. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  573. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at>\n"
  574. data = json.dumps({
  575. "msg_type": "interactive",
  576. "card": {
  577. "config": {
  578. "wide_screen_mode": True,
  579. "enable_forward": True
  580. },
  581. "elements": [{
  582. "tag": "div",
  583. "text": {
  584. "content": users + text,
  585. "tag": "lark_md"
  586. }
  587. }, {
  588. "actions": [{
  589. "tag": "button",
  590. "text": {
  591. "content": content,
  592. "tag": "lark_md"
  593. },
  594. "url": sheet_url,
  595. "type": "default",
  596. "value": {}
  597. }],
  598. "tag": "action"
  599. }],
  600. "header": {
  601. "title": {
  602. "content": "📣您有新的报警,请注意查收",
  603. "tag": "plain_text"
  604. }
  605. }
  606. }
  607. })
  608. urllib3.disable_warnings()
  609. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  610. Common.logger(log_type, crawler).info(f'触发机器人消息:{r}, {r.json()["StatusMessage"]}')
  611. except Exception as e:
  612. Common.logger(log_type, crawler).error(f"bot异常:{e}\n")
  613. if __name__ == "__main__":
  614. Feishu.bot('follow', 'xigua', '测试一下,请忽略 ~')