feishu.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/1/31
  4. """
  5. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  6. """
  7. import json
  8. import os
  9. import sys
  10. import requests
  11. import urllib3
  12. sys.path.append(os.getcwd())
  13. from common.common import Common
  14. # from common import Common
  15. proxies = {"http": None, "https": None}
  16. class Feishu:
  17. """
  18. 编辑飞书云文档
  19. """
  20. # 看一看爬虫数据表
  21. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  22. # 快手爬虫数据表
  23. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  24. # 微视爬虫数据表
  25. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  26. # 小年糕爬虫数据表
  27. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  28. # 音乐相册
  29. music_album = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?"
  30. # 本山祝福数据表
  31. crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
  32. # 公众号爬虫表
  33. gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
  34. # 数据监控表
  35. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  36. # 微群视频爬虫表
  37. crawler_weiqun_video = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc?"
  38. # 视频号爬虫表
  39. crawler_shipinhao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?'
  40. # 西瓜视频
  41. crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
  42. # 知乎 PC 端
  43. crawler_zhihu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?'
  44. # 吉祥幸福
  45. crawler_jixiangxingfu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf?'
  46. # 众妙音信
  47. crawler_zmyx = 'https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve?'
  48. # 岁岁年年迎福气
  49. crawler_ssnnyfq = 'https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?'
  50. # 祝福猫视频
  51. crawler_zhufumao = 'https://w42nne6hzg.feishu.cn/sheets/shtcnXfIJthvkjhI5zlEJq84i6g?'
  52. # 宗教公众号
  53. crawler_zongjiao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb?'
  54. # 好看视频
  55. crawler_haokan = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd'
  56. # 看到就是福气
  57. crawler_kandaojiushifuqi = 'https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb'
  58. # 胜胜影音
  59. crawler_shengshengyingyin = 'https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe'
  60. # 刚刚都传
  61. crawler_ganggangdouchuan = 'https://w42nne6hzg.feishu.cn/sheets/shtcnTuJgeZU2bc7VaesAqk3QJx'
  62. # 知青天天看
  63. crawler_zhiqingtiantiankan = 'https://w42nne6hzg.feishu.cn/sheets/shtcnjmhKdJOKdqnEzJcZb5xaHc?'
  64. # 公众号_信欣
  65. crawler_gongzhonghao = 'https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?'
  66. # YouTube
  67. crawler_youtube = 'https://w42nne6hzg.feishu.cn/sheets/shtcnrLyr1zbYbhhZyqpN7Xrd5f?'
  68. # 微信指数
  69. weixinzhishu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?'
  70. # 微信指数_搜索词
  71. weixinzhishu_search_word = 'https://w42nne6hzg.feishu.cn/sheets/shtcnHxCj6dZBYMuK1Q3tIJVlqg?'
  72. # 飞书路径token
  73. @classmethod
  74. def spreadsheettoken(cls, crawler):
  75. """
  76. :param crawler: 哪个爬虫
  77. """
  78. if crawler == "kanyikan":
  79. return "shtcngRPoDYAi24x52j2nDuHMih"
  80. elif crawler == "kuaishou":
  81. return "shtcnICEfaw9llDNQkKgdymM1xf"
  82. elif crawler == "weishi":
  83. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  84. elif crawler == "xiaoniangao":
  85. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  86. elif crawler == "monitor":
  87. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  88. elif crawler == "music_album":
  89. return "shtcnT6zvmfsYe1g0iv4pt7855g"
  90. elif crawler == "benshanzhufu":
  91. return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
  92. elif crawler == "gzh":
  93. return "shtcnexNXnpDLHhARw0QdiwbYuA"
  94. elif crawler == "weiqun":
  95. return "shtcnoKThNquYRweaylMFVyo9Hc"
  96. elif crawler == 'shipinhao':
  97. return 'shtcn9rOdZRAGFbRkWpn7hqEHGc'
  98. elif crawler == 'xigua':
  99. return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
  100. elif crawler == 'zhihu':
  101. return 'shtcnkGPBmGsjaqapgzouuj8MXe'
  102. elif crawler == 'jixiangxingfu':
  103. return 'shtcnSx4nafMbLTq7xl7RHBwHBf'
  104. elif crawler == 'zhongmiaoyinxin':
  105. return 'shtcnbZIxstPeM0xshW07b26sve'
  106. elif crawler == 'suisuiniannianyingfuqi':
  107. return 'shtcnyJmJSJynHDLLbLTkySfvZe'
  108. elif crawler == 'zhufumao':
  109. return 'shtcnXfIJthvkjhI5zlEJq84i6g'
  110. elif crawler == 'zongjiao':
  111. return 'shtcn73NW0CyoOeF21HWO15KBsb'
  112. elif crawler == 'haokan':
  113. return 'shtcnaYz8Nhv8q6DbWtlL6rMEBd'
  114. elif crawler == 'kandaojiushifuqi':
  115. return 'shtcnEokBkIjOUPAk8vbbPKnXgb'
  116. elif crawler == 'shengshengyingyin':
  117. return 'shtcnz1ymxHL1u8WHblfqfys7qe'
  118. elif crawler == 'ganggangdouchuan':
  119. return 'shtcnTuJgeZU2bc7VaesAqk3QJx'
  120. elif crawler == 'youtube':
  121. return 'shtcnrLyr1zbYbhhZyqpN7Xrd5f'
  122. elif crawler == 'weixinzhishu':
  123. return 'shtcnqhMRUGunIfGnGXMOBYiy4K'
  124. elif crawler == 'weixinzhishu_search_word':
  125. return 'shtcnHxCj6dZBYMuK1Q3tIJVlqg'
  126. elif crawler == 'gongzhonghao':
  127. return 'shtcna98M2mX7TbivTj9Sb7WKBN'
  128. elif crawler == 'douyin':
  129. return 'shtcnhq63MoXOpqbkuLuoapYIAh'
  130. elif crawler == 'zhiqingtiantiankan':
  131. return 'shtcnjmhKdJOKdqnEzJcZb5xaHc'
  132. # 获取飞书api token
  133. @classmethod
  134. def get_token(cls, log_type, crawler):
  135. """
  136. 获取飞书api token
  137. :return:
  138. """
  139. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  140. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  141. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  142. try:
  143. urllib3.disable_warnings()
  144. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  145. tenant_access_token = response.json()["tenant_access_token"]
  146. return tenant_access_token
  147. except Exception as e:
  148. Common.logger(log_type, crawler).error("获取飞书 api token 异常:{}", e)
  149. # 获取表格元数据
  150. @classmethod
  151. def get_metainfo(cls, log_type, crawler):
  152. """
  153. 获取表格元数据
  154. :return:
  155. """
  156. try:
  157. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  158. + cls.spreadsheettoken(crawler) + "/metainfo"
  159. headers = {
  160. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  161. "Content-Type": "application/json; charset=utf-8"
  162. }
  163. params = {
  164. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  165. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  166. }
  167. urllib3.disable_warnings()
  168. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  169. response = json.loads(r.content.decode("utf8"))
  170. return response
  171. except Exception as e:
  172. Common.logger(log_type, crawler).error("获取表格元数据异常:{}", e)
  173. # 读取工作表中所有数据
  174. @classmethod
  175. def get_values_batch(cls, log_type, crawler, sheetid):
  176. """
  177. 读取工作表中所有数据
  178. :param log_type: 启用哪个 log
  179. :param crawler: 哪个爬虫
  180. :param sheetid: 哪张表
  181. :return: 所有数据
  182. """
  183. try:
  184. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  185. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  186. headers = {
  187. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  188. "Content-Type": "application/json; charset=utf-8"
  189. }
  190. params = {
  191. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  192. "ranges": sheetid,
  193. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  194. # valueRenderOption=FormattedValue 计算并格式化单元格;
  195. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  196. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  197. "valueRenderOption": "ToString",
  198. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  199. "dateTimeRenderOption": "",
  200. # 返回的用户id类型,可选open_id,union_id
  201. "user_id_type": "open_id"
  202. }
  203. urllib3.disable_warnings()
  204. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  205. # print(r.text)
  206. response = json.loads(r.content.decode("utf8"))
  207. values = response["data"]["valueRanges"][0]["values"]
  208. return values
  209. except Exception as e:
  210. Common.logger(log_type, crawler).error("读取工作表所有数据异常:{}", e)
  211. # 工作表,插入行或列
  212. @classmethod
  213. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  214. """
  215. 工作表插入行或列
  216. :param log_type: 日志路径
  217. :param crawler: 哪个爬虫的云文档
  218. :param sheetid:哪张工作表
  219. :param majordimension:行或者列, ROWS、COLUMNS
  220. :param startindex:开始位置
  221. :param endindex:结束位置
  222. """
  223. try:
  224. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  225. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  226. headers = {
  227. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  228. "Content-Type": "application/json; charset=utf-8"
  229. }
  230. body = {
  231. "dimension": {
  232. "sheetId": sheetid,
  233. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  234. "startIndex": startindex, # 开始的位置
  235. "endIndex": endindex # 结束的位置
  236. },
  237. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  238. }
  239. urllib3.disable_warnings()
  240. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  241. Common.logger(log_type, crawler).info("插入行或列:{}", r.json()["msg"])
  242. except Exception as e:
  243. Common.logger(log_type, crawler).error("插入行或列异常:{}", e)
  244. # 写入数据
  245. @classmethod
  246. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  247. """
  248. 写入数据
  249. :param log_type: 日志路径
  250. :param crawler: 哪个爬虫的云文档
  251. :param sheetid:哪张工作表
  252. :param ranges:单元格范围
  253. :param values:写入的具体数据,list
  254. """
  255. try:
  256. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  257. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  258. headers = {
  259. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  260. "Content-Type": "application/json; charset=utf-8"
  261. }
  262. body = {
  263. "valueRanges": [
  264. {
  265. "range": sheetid + "!" + ranges,
  266. "values": values
  267. },
  268. ],
  269. }
  270. urllib3.disable_warnings()
  271. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  272. Common.logger(log_type, crawler).info("写入数据:{}", r.json()["msg"])
  273. except Exception as e:
  274. Common.logger(log_type, crawler).error("写入数据异常:{}", e)
  275. # 合并单元格
  276. @classmethod
  277. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  278. """
  279. 合并单元格
  280. :param log_type: 日志路径
  281. :param crawler: 哪个爬虫
  282. :param sheetid:哪张工作表
  283. :param ranges:需要合并的单元格范围
  284. """
  285. try:
  286. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  287. + cls.spreadsheettoken(crawler) + "/merge_cells"
  288. headers = {
  289. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  290. "Content-Type": "application/json; charset=utf-8"
  291. }
  292. body = {
  293. "range": sheetid + "!" + ranges,
  294. "mergeType": "MERGE_ROWS"
  295. }
  296. urllib3.disable_warnings()
  297. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  298. Common.logger(log_type, crawler).info("合并单元格:{}", r.json()["msg"])
  299. except Exception as e:
  300. Common.logger(log_type, crawler).error("合并单元格异常:{}", e)
  301. # 读取单元格数据
  302. @classmethod
  303. def get_range_value(cls, log_type, crawler, sheetid, cell):
  304. """
  305. 读取单元格内容
  306. :param log_type: 日志路径
  307. :param crawler: 哪个爬虫
  308. :param sheetid: 哪张工作表
  309. :param cell: 哪个单元格
  310. :return: 单元格内容
  311. """
  312. try:
  313. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  314. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  315. headers = {
  316. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  317. "Content-Type": "application/json; charset=utf-8"
  318. }
  319. params = {
  320. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  321. # valueRenderOption=FormattedValue 计算并格式化单元格;
  322. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  323. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  324. "valueRenderOption": "FormattedValue",
  325. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  326. "dateTimeRenderOption": "",
  327. # 返回的用户id类型,可选open_id,union_id
  328. "user_id_type": "open_id"
  329. }
  330. urllib3.disable_warnings()
  331. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  332. # print(r.text)
  333. return r.json()["data"]["valueRange"]["values"][0]
  334. except Exception as e:
  335. Common.logger(log_type, crawler).error("读取单元格数据异常:{}", e)
  336. # 获取表内容
  337. @classmethod
  338. def get_sheet_content(cls, log_type, crawler, sheet_id):
  339. try:
  340. sheet = Feishu.get_values_batch(log_type, crawler, sheet_id)
  341. content_list = []
  342. for x in sheet:
  343. for y in x:
  344. if y is None:
  345. pass
  346. else:
  347. content_list.append(y)
  348. return content_list
  349. except Exception as e:
  350. Common.logger(log_type, crawler).error(f'get_sheet_content:{e}\n')
  351. # 删除行或列,可选 ROWS、COLUMNS
  352. @classmethod
  353. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  354. """
  355. 删除行或列
  356. :param log_type: 日志路径
  357. :param crawler: 哪个爬虫
  358. :param sheetid:工作表
  359. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  360. :param startindex:开始的位置
  361. :param endindex:结束的位置
  362. :return:
  363. """
  364. try:
  365. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  366. + cls.spreadsheettoken(crawler) + "/dimension_range"
  367. headers = {
  368. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  369. "Content-Type": "application/json; charset=utf-8"
  370. }
  371. body = {
  372. "dimension": {
  373. "sheetId": sheetid,
  374. "majorDimension": major_dimension,
  375. "startIndex": startindex,
  376. "endIndex": endindex
  377. }
  378. }
  379. urllib3.disable_warnings()
  380. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  381. Common.logger(log_type, crawler).info("删除视频数据:{}", r.json()["msg"])
  382. except Exception as e:
  383. Common.logger(log_type, crawler).error("删除视频数据异常:{}", e)
  384. # 获取用户 ID
  385. @classmethod
  386. def get_userid(cls, log_type, crawler, username):
  387. try:
  388. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  389. headers = {
  390. "Authorization": "Bearer " + cls.get_token(log_type, crawler),
  391. "Content-Type": "application/json; charset=utf-8"
  392. }
  393. if username == "wangkun":
  394. username = "13426262515"
  395. elif username == "gaonannan":
  396. username = "18501180073"
  397. elif username == "xinxin":
  398. username = "15546206651"
  399. elif username == "huxinxue":
  400. username = "18832292015"
  401. elif username == "wuchaoyue":
  402. username = "15712941385"
  403. elif username == "muxinyi":
  404. username = '13699208058'
  405. elif username == "wangxueke":
  406. username = '13513479926'
  407. data = {"mobiles": [username]}
  408. urllib3.disable_warnings()
  409. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  410. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  411. # Common.logger(log_type, crawler).info(f"{username}:{open_id}")
  412. # print(f"{username}:{open_id}")
  413. return open_id
  414. except Exception as e:
  415. Common.logger(log_type, crawler).error(f"get_userid异常:{e}\n")
  416. # 飞书机器人
  417. @classmethod
  418. def bot(cls, log_type, crawler, text):
  419. try:
  420. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  421. headers = {'Content-Type': 'application/json'}
  422. if crawler == "kanyikan":
  423. content = "看一看爬虫表"
  424. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih"
  425. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  426. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  427. elif crawler == "weixinzhishu_out":
  428. content = "微信指数_站外指数"
  429. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=YVuVgQ"
  430. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  431. cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
  432. elif crawler == "weixinzhishu_inner_sort":
  433. content = "微信指数_站内短期指数"
  434. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=DrZHpa"
  435. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  436. cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
  437. elif crawler == "weixinzhishu_inner_long":
  438. content = "微信指数_站内长期指数"
  439. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=JpgyAv"
  440. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  441. cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
  442. elif crawler == "xiaoniangao_hour":
  443. content = "小年糕_小时级_已下载表"
  444. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
  445. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  446. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  447. elif crawler == "xiaoniangao_person":
  448. content = "小年糕_用户主页_已下载表"
  449. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=Wu0CeL"
  450. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  451. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  452. elif crawler == "xiaoniangao_play":
  453. content = "小年糕_播放量_已下载表"
  454. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=c85k1C"
  455. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  456. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  457. elif crawler == 'xigua':
  458. content = '西瓜视频_用户主页_已下载表'
  459. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=e075e9'
  460. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  461. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  462. elif crawler == 'xigua_little_video':
  463. content = '西瓜视频_小视频_已下载表'
  464. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=hDSDnv'
  465. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  466. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  467. elif crawler == 'zhihu_hot':
  468. content = '知乎_热门_已下载表'
  469. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=8871e3'
  470. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  471. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  472. elif crawler == 'zhihu_follow':
  473. content = '知乎_定向_已下载表'
  474. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=4MGuux'
  475. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  476. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  477. elif crawler == 'haokan_hot':
  478. content = '好看_热榜_已下载表'
  479. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=5pWipX'
  480. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  481. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  482. elif crawler == 'haokan_channel':
  483. content = '好看_频道_已下载表'
  484. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=7f05d8'
  485. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  486. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  487. elif crawler == 'haokan_follow':
  488. content = '好看_定向_已下载表'
  489. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=kVaSjf'
  490. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  491. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  492. elif crawler == "music_album":
  493. content = "音乐相册爬虫表"
  494. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g"
  495. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  496. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  497. elif crawler == "ssyy":
  498. content = "胜胜影音爬虫表"
  499. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe"
  500. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  501. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  502. elif crawler == "ggdc":
  503. content = "刚刚都传爬虫表"
  504. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnTuJgeZU2bc7VaesAqk3QJx"
  505. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  506. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  507. elif crawler == "bszf":
  508. content = "本山祝福爬虫表"
  509. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb"
  510. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  511. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  512. elif crawler == "jxxf":
  513. content = "吉祥幸福爬虫表"
  514. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf"
  515. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  516. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  517. elif crawler == "zmyx":
  518. content = "众妙音信爬虫表"
  519. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve"
  520. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  521. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  522. elif crawler == "zhufumao":
  523. content = "祝福猫视频爬虫表"
  524. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnXfIJthvkjhI5zlEJq84i6g"
  525. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  526. cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
  527. elif crawler == "kuaishou_follow":
  528. content = "快手_用户主页_已下载表"
  529. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=fYdA8F"
  530. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  531. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  532. elif crawler == "kuaishou_recommend":
  533. content = "快手_推荐榜_已下载表"
  534. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=3cd128"
  535. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  536. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  537. elif crawler == "ssnnyfq":
  538. content = "岁岁年年迎福气_已下载表"
  539. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?sheet=290bae"
  540. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  541. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  542. elif crawler == "kdjsfq":
  543. content = "看到就是福气_已下载表"
  544. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb?sheet=ad3b6d"
  545. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  546. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  547. elif crawler == "gzh":
  548. content = "公众号爬虫表"
  549. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA"
  550. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  551. cls.get_userid(log_type, crawler, "huxinxue")) + "></at>\n"
  552. elif crawler == "gongzhonghao":
  553. content = "公众号_信欣_爬虫表"
  554. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?"
  555. users = f"\n<at id={str(cls.get_userid(log_type, crawler, 'huxinxue'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'wangxueke'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'xinxin'))}></at>\n"
  556. elif crawler == "weiqun":
  557. content = "微群爬虫表"
  558. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc"
  559. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  560. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  561. elif crawler == "weishi":
  562. content = "微视爬虫表"
  563. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh"
  564. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  565. cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
  566. elif crawler == "shipinhao_recommend":
  567. content = "视频号_推荐_已下载表"
  568. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?sheet=c77cf9"
  569. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  570. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  571. elif crawler == "shipinhao_follow":
  572. content = "视频号_定向_已下载表"
  573. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?sheet=KsVtLe"
  574. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  575. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  576. elif crawler == "youtube":
  577. content = "youtube_定向_已下载表"
  578. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnrLyr1zbYbhhZyqpN7Xrd5f?sheet=GVxlYk"
  579. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  580. cls.get_userid(log_type, crawler, "wuchaoyue")) + "></at>\n"
  581. elif crawler == "zongjiao":
  582. content = "宗教公众号爬虫表"
  583. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb"
  584. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
  585. cls.get_userid(log_type, crawler, "huxinxue")) + "></at>\n"
  586. else:
  587. content = "小年糕爬虫表"
  588. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  589. users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at>\n"
  590. data = json.dumps({
  591. "msg_type": "interactive",
  592. "card": {
  593. "config": {
  594. "wide_screen_mode": True,
  595. "enable_forward": True
  596. },
  597. "elements": [{
  598. "tag": "div",
  599. "text": {
  600. "content": users + text,
  601. "tag": "lark_md"
  602. }
  603. }, {
  604. "actions": [{
  605. "tag": "button",
  606. "text": {
  607. "content": content,
  608. "tag": "lark_md"
  609. },
  610. "url": sheet_url,
  611. "type": "default",
  612. "value": {}
  613. }],
  614. "tag": "action"
  615. }],
  616. "header": {
  617. "title": {
  618. "content": "📣您有新的信息,请注意查收",
  619. "tag": "plain_text"
  620. }
  621. }
  622. }
  623. })
  624. urllib3.disable_warnings()
  625. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  626. Common.logger(log_type, crawler).info(f'触发机器人消息:{r.status_code}, {text}')
  627. except Exception as e:
  628. Common.logger(log_type, crawler).error(f"bot异常:{e}\n")
  629. if __name__ == "__main__":
  630. Feishu.bot('bor', 'gongzhonghao', 'token过期啦,请扫码更换\nhttps://mp.weixin.qq.com/')