feishu_lib.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/8/9
  4. import json
  5. import requests
  6. import urllib3
  7. from main.common import Common
  8. proxies = {"http": None, "https": None}
  9. class Feishu:
  10. """
  11. 编辑飞书云文档
  12. """
  13. # 看一看爬虫数据表
  14. kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
  15. # 快手爬虫数据表
  16. kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
  17. # 微视爬虫数据表
  18. weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
  19. # 小年糕爬虫数据表
  20. xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
  21. # 音乐相册
  22. music_album = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?"
  23. # 本山祝福数据表
  24. crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
  25. # 公众号爬虫表
  26. gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
  27. # 数据监控表
  28. crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
  29. # 微群视频爬虫表
  30. crawler_weiqun_video = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc?"
  31. # 视频号爬虫表
  32. crawler_shipinhao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?'
  33. # 西瓜视频
  34. crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
  35. # 知乎 PC 端
  36. crawler_zhihu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?'
  37. # 吉祥幸福
  38. crawler_jixiangxingfu = 'https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf?'
  39. # 众妙音信
  40. crawler_zmyx = 'https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve?'
  41. # 岁岁年年迎福气
  42. crawler_ssnnyfq = 'https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?'
  43. # 祝福猫视频
  44. crawler_zhufumao = 'https://w42nne6hzg.feishu.cn/sheets/shtcnXfIJthvkjhI5zlEJq84i6g?'
  45. # 宗教公众号
  46. crawler_zongjiao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb?'
  47. # 好看视频
  48. crawler_haokan = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd'
  49. # 看到就是福气
  50. crawler_kandaojiushifuqi = 'https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb'
  51. # 胜胜影音
  52. crawler_shengshengyingyin = 'https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe'
  53. # 飞书路径token
  54. @classmethod
  55. def spreadsheettoken(cls, crawler):
  56. """
  57. :param crawler: 哪个爬虫
  58. """
  59. if crawler == "kanyikan":
  60. return "shtcngRPoDYAi24x52j2nDuHMih"
  61. elif crawler == "kuaishou":
  62. return "shtcnICEfaw9llDNQkKgdymM1xf"
  63. elif crawler == "weishi":
  64. return "shtcn5YSWg91JfVGzj0SFZIRRPh"
  65. elif crawler == "xiaoniangao":
  66. return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  67. elif crawler == "monitor":
  68. return "shtcnlZWYazInhf7Z60jkbLRJyd"
  69. elif crawler == "music_album":
  70. return "shtcnT6zvmfsYe1g0iv4pt7855g"
  71. elif crawler == "bszf":
  72. return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
  73. elif crawler == "gzh":
  74. return "shtcnexNXnpDLHhARw0QdiwbYuA"
  75. elif crawler == "weiqun":
  76. return "shtcnoKThNquYRweaylMFVyo9Hc"
  77. elif crawler == 'shipinhao':
  78. return 'shtcn9rOdZRAGFbRkWpn7hqEHGc'
  79. elif crawler == 'xigua':
  80. return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
  81. elif crawler == 'zhihu':
  82. return 'shtcnkGPBmGsjaqapgzouuj8MXe'
  83. elif crawler == 'jxxf':
  84. return 'shtcnSx4nafMbLTq7xl7RHBwHBf'
  85. elif crawler == 'zmyx':
  86. return 'shtcnbZIxstPeM0xshW07b26sve'
  87. elif crawler == 'ssnnyfq':
  88. return 'shtcnyJmJSJynHDLLbLTkySfvZe'
  89. elif crawler == 'zhufumao':
  90. return 'shtcnXfIJthvkjhI5zlEJq84i6g'
  91. elif crawler == 'zongjiao':
  92. return 'shtcn73NW0CyoOeF21HWO15KBsb'
  93. elif crawler == 'haokan':
  94. return 'shtcnaYz8Nhv8q6DbWtlL6rMEBd'
  95. elif crawler == 'kdjsfq':
  96. return 'shtcnEokBkIjOUPAk8vbbPKnXgb'
  97. elif crawler == 'ssyy':
  98. return 'shtcnz1ymxHL1u8WHblfqfys7qe'
  99. # 获取飞书api token
  100. @classmethod
  101. def get_token(cls, log_type):
  102. """
  103. 获取飞书api token
  104. :return:
  105. """
  106. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  107. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  108. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  109. try:
  110. urllib3.disable_warnings()
  111. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  112. tenant_access_token = response.json()["tenant_access_token"]
  113. return tenant_access_token
  114. except Exception as e:
  115. Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
  116. # 获取表格元数据
  117. @classmethod
  118. def get_metainfo(cls, log_type, crawler):
  119. """
  120. 获取表格元数据
  121. :return:
  122. """
  123. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  124. + cls.spreadsheettoken(crawler) + "/metainfo"
  125. headers = {
  126. "Authorization": "Bearer " + cls.get_token(log_type),
  127. "Content-Type": "application/json; charset=utf-8"
  128. }
  129. params = {
  130. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  131. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  132. }
  133. try:
  134. urllib3.disable_warnings()
  135. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  136. response = json.loads(r.content.decode("utf8"))
  137. return response
  138. except Exception as e:
  139. Common.logger(log_type).error("获取表格元数据异常:{}", e)
  140. # 读取工作表中所有数据
  141. @classmethod
  142. def get_values_batch(cls, log_type, crawler, sheetid):
  143. """
  144. 读取工作表中所有数据
  145. :param log_type: 启用哪个 log
  146. :param crawler: 哪个爬虫
  147. :param sheetid: 哪张表
  148. :return: 所有数据
  149. """
  150. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  151. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  152. headers = {
  153. "Authorization": "Bearer " + cls.get_token(log_type),
  154. "Content-Type": "application/json; charset=utf-8"
  155. }
  156. params = {
  157. # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
  158. "ranges": sheetid,
  159. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  160. # valueRenderOption=FormattedValue 计算并格式化单元格;
  161. # valueRenderOption=Formula单元格中含有公式时返回公式本身;
  162. # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
  163. "valueRenderOption": "ToString",
  164. # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  165. "dateTimeRenderOption": "",
  166. # 返回的用户id类型,可选open_id,union_id
  167. "user_id_type": "open_id"
  168. }
  169. try:
  170. urllib3.disable_warnings()
  171. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  172. # print(r.text)
  173. response = json.loads(r.content.decode("utf8"))
  174. values = response["data"]["valueRanges"][0]["values"]
  175. return values
  176. except Exception as e:
  177. Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
  178. # 工作表,插入行或列
  179. @classmethod
  180. def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
  181. """
  182. 工作表插入行或列
  183. :param log_type: 日志路径
  184. :param crawler: 哪个爬虫的云文档
  185. :param sheetid:哪张工作表
  186. :param majordimension:行或者列, ROWS、COLUMNS
  187. :param startindex:开始位置
  188. :param endindex:结束位置
  189. """
  190. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  191. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  192. headers = {
  193. "Authorization": "Bearer " + cls.get_token(log_type),
  194. "Content-Type": "application/json; charset=utf-8"
  195. }
  196. body = {
  197. "dimension": {
  198. "sheetId": sheetid,
  199. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  200. "startIndex": startindex, # 开始的位置
  201. "endIndex": endindex # 结束的位置
  202. },
  203. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  204. }
  205. try:
  206. urllib3.disable_warnings()
  207. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  208. Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
  209. except Exception as e:
  210. Common.logger(log_type).error("插入行或列异常:{}", e)
  211. # 写入数据
  212. @classmethod
  213. def update_values(cls, log_type, crawler, sheetid, ranges, values):
  214. """
  215. 写入数据
  216. :param log_type: 日志路径
  217. :param crawler: 哪个爬虫的云文档
  218. :param sheetid:哪张工作表
  219. :param ranges:单元格范围
  220. :param values:写入的具体数据,list
  221. """
  222. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  223. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  224. headers = {
  225. "Authorization": "Bearer " + cls.get_token(log_type),
  226. "Content-Type": "application/json; charset=utf-8"
  227. }
  228. body = {
  229. "valueRanges": [
  230. {
  231. "range": sheetid + "!" + ranges,
  232. "values": values
  233. },
  234. ],
  235. }
  236. try:
  237. urllib3.disable_warnings()
  238. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  239. Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
  240. except Exception as e:
  241. Common.logger(log_type).error("写入数据异常:{}", e)
  242. # 合并单元格
  243. @classmethod
  244. def merge_cells(cls, log_type, crawler, sheetid, ranges):
  245. """
  246. 合并单元格
  247. :param log_type: 日志路径
  248. :param crawler: 哪个爬虫
  249. :param sheetid:哪张工作表
  250. :param ranges:需要合并的单元格范围
  251. """
  252. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  253. + cls.spreadsheettoken(crawler) + "/merge_cells"
  254. headers = {
  255. "Authorization": "Bearer " + cls.get_token(log_type),
  256. "Content-Type": "application/json; charset=utf-8"
  257. }
  258. body = {
  259. "range": sheetid + "!" + ranges,
  260. "mergeType": "MERGE_ROWS"
  261. }
  262. try:
  263. urllib3.disable_warnings()
  264. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  265. Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
  266. except Exception as e:
  267. Common.logger(log_type).error("合并单元格异常:{}", e)
  268. # 读取单元格数据
  269. @classmethod
  270. def get_range_value(cls, log_type, crawler, sheetid, cell):
  271. """
  272. 读取单元格内容
  273. :param log_type: 日志路径
  274. :param crawler: 哪个爬虫
  275. :param sheetid: 哪张工作表
  276. :param cell: 哪个单元格
  277. :return: 单元格内容
  278. """
  279. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  280. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  281. headers = {
  282. "Authorization": "Bearer " + cls.get_token(log_type),
  283. "Content-Type": "application/json; charset=utf-8"
  284. }
  285. params = {
  286. # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
  287. # valueRenderOption=FormattedValue 计算并格式化单元格;
  288. # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
  289. # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
  290. "valueRenderOption": "FormattedValue",
  291. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  292. "dateTimeRenderOption": "",
  293. # 返回的用户id类型,可选open_id,union_id
  294. "user_id_type": "open_id"
  295. }
  296. try:
  297. urllib3.disable_warnings()
  298. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  299. # print(r.text)
  300. return r.json()["data"]["valueRange"]["values"][0]
  301. except Exception as e:
  302. Common.logger(log_type).error("读取单元格数据异常:{}", e)
  303. # 删除行或列,可选 ROWS、COLUMNS
  304. @classmethod
  305. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  306. """
  307. 删除行或列
  308. :param log_type: 日志路径
  309. :param crawler: 哪个爬虫
  310. :param sheetid:工作表
  311. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  312. :param startindex:开始的位置
  313. :param endindex:结束的位置
  314. :return:
  315. """
  316. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  317. + cls.spreadsheettoken(crawler) + "/dimension_range"
  318. headers = {
  319. "Authorization": "Bearer " + cls.get_token(log_type),
  320. "Content-Type": "application/json; charset=utf-8"
  321. }
  322. body = {
  323. "dimension": {
  324. "sheetId": sheetid,
  325. "majorDimension": major_dimension,
  326. "startIndex": startindex,
  327. "endIndex": endindex
  328. }
  329. }
  330. try:
  331. urllib3.disable_warnings()
  332. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  333. Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
  334. except Exception as e:
  335. Common.logger(log_type).error(f"删除视频数据异常:{e}\n")
  336. # 获取用户 ID
  337. @classmethod
  338. def get_userid(cls, log_type, username):
  339. try:
  340. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  341. headers = {
  342. "Authorization": "Bearer " + cls.get_token(log_type),
  343. "Content-Type": "application/json; charset=utf-8"
  344. }
  345. # 手机号
  346. wangkun = "13426262515"
  347. gaonannan = "18501180073"
  348. xinxin = "15546206651"
  349. huxinxue = "18832292015"
  350. wuchaoyue = "15712941385"
  351. lijinchao = '18524120540'
  352. if username == "wangkun":
  353. username = wangkun
  354. elif username == "gaonannan":
  355. username = gaonannan
  356. elif username == "xinxin":
  357. username = xinxin
  358. elif username == "huxinxue":
  359. username = huxinxue
  360. elif username == "wuchaoyue":
  361. username = wuchaoyue
  362. elif username == "lijinchao":
  363. username = lijinchao
  364. data = {"mobiles": [username]}
  365. urllib3.disable_warnings()
  366. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  367. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  368. Common.logger(log_type).info(f"{username}:{open_id}")
  369. # print(f"{username}:{open_id}")
  370. return open_id
  371. except Exception as e:
  372. Common.logger(log_type).error(f"get_userid异常:{e}\n")
  373. # 飞书机器人
  374. @classmethod
  375. def bot(cls, log_type, crawler, text):
  376. try:
  377. url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
  378. headers = {
  379. 'Content-Type': 'application/json'
  380. }
  381. if crawler == "kanyikan":
  382. content = "看一看爬虫表"
  383. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih"
  384. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  385. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  386. elif crawler == "xiaoniangao_hour":
  387. content = "小年糕_小时级_已下载表"
  388. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
  389. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  390. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  391. elif crawler == "xiaoniangao_person":
  392. content = "小年糕_用户主页_已下载表"
  393. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=Wu0CeL"
  394. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  395. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  396. elif crawler == "xiaoniangao_play":
  397. content = "小年糕_播放量_已下载表"
  398. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=c85k1C"
  399. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  400. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  401. elif crawler == 'xigua_video':
  402. content = '西瓜视频_用户主页_已下载表'
  403. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=e075e9'
  404. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  405. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  406. elif crawler == 'xigua_little_video':
  407. content = '西瓜视频_小视频_已下载表'
  408. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=hDSDnv'
  409. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  410. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  411. elif crawler == 'zhihu_hot':
  412. content = '知乎_热门_已下载表'
  413. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=8871e3'
  414. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  415. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  416. elif crawler == 'zhihu_follow':
  417. content = '知乎_定向_已下载表'
  418. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnkGPBmGsjaqapgzouuj8MXe?sheet=4MGuux'
  419. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  420. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  421. elif crawler == 'haokan_hot':
  422. content = '好看_热榜_已下载表'
  423. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=5pWipX'
  424. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  425. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  426. elif crawler == 'haokan_channel':
  427. content = '好看_频道_已下载表'
  428. sheet_url = 'https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=7f05d8'
  429. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  430. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  431. elif crawler == "music_album":
  432. content = "音乐相册爬虫表"
  433. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g"
  434. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  435. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  436. elif crawler == "ssyy":
  437. content = "胜胜影音爬虫表"
  438. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnz1ymxHL1u8WHblfqfys7qe"
  439. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  440. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  441. elif crawler == "bszf":
  442. content = "本山祝福爬虫表"
  443. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb"
  444. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  445. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  446. elif crawler == "jxxf":
  447. content = "吉祥幸福爬虫表"
  448. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnSx4nafMbLTq7xl7RHBwHBf"
  449. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  450. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  451. elif crawler == "zmyx":
  452. content = "众妙音信爬虫表"
  453. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnbZIxstPeM0xshW07b26sve"
  454. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  455. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  456. elif crawler == "zhufumao":
  457. content = "祝福猫视频爬虫表"
  458. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnXfIJthvkjhI5zlEJq84i6g"
  459. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  460. cls.get_userid(log_type, "gaonannan")) + "></at>\n"
  461. elif crawler == "kuaishou_follow":
  462. content = "快手_用户主页_已下载表"
  463. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=fYdA8F"
  464. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  465. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  466. elif crawler == "kuaishou_recommend":
  467. content = "快手_推荐榜_已下载表"
  468. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=3cd128"
  469. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  470. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  471. elif crawler == "ssnnyfq":
  472. content = "岁岁年年迎福气_已下载表"
  473. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnyJmJSJynHDLLbLTkySfvZe?sheet=290bae"
  474. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  475. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  476. elif crawler == "kdjsfq":
  477. content = "看到就是福气_已下载表"
  478. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnEokBkIjOUPAk8vbbPKnXgb?sheet=ad3b6d"
  479. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  480. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  481. elif crawler == "gzh":
  482. content = "公众号爬虫表"
  483. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA"
  484. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  485. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  486. elif crawler == "weiqun":
  487. content = "微群爬虫表"
  488. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnoKThNquYRweaylMFVyo9Hc"
  489. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  490. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  491. elif crawler == "weishi":
  492. content = "微视爬虫表"
  493. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh"
  494. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  495. cls.get_userid(log_type, "xinxin")) + "></at>\n"
  496. elif crawler == "shipinhao_recommend":
  497. content = "视频号_推荐_已下载表"
  498. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?sheet=c77cf9"
  499. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  500. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  501. elif crawler == "shipinhao_follow":
  502. content = "视频号_定向_已下载表"
  503. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn9rOdZRAGFbRkWpn7hqEHGc?sheet=KsVtLe"
  504. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  505. cls.get_userid(log_type, "wuchaoyue")) + "></at>\n"
  506. elif crawler == "zongjiao":
  507. content = "宗教公众号爬虫表"
  508. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb"
  509. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at> <at id=" + str(
  510. cls.get_userid(log_type, "huxinxue")) + "></at>\n"
  511. else:
  512. content = "小年糕爬虫表"
  513. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh"
  514. users = "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n"
  515. data = json.dumps({
  516. "msg_type": "interactive",
  517. "card": {
  518. "config": {
  519. "wide_screen_mode": True,
  520. "enable_forward": True
  521. },
  522. "elements": [{
  523. "tag": "div",
  524. "text": {
  525. "content": users + text,
  526. "tag": "lark_md"
  527. }
  528. }, {
  529. "actions": [{
  530. "tag": "button",
  531. "text": {
  532. "content": content,
  533. "tag": "lark_md"
  534. },
  535. "url": sheet_url,
  536. "type": "default",
  537. "value": {}
  538. }],
  539. "tag": "action"
  540. }],
  541. "header": {
  542. "title": {
  543. "content": "📣您有新的报警,请注意查收",
  544. "tag": "plain_text"
  545. }
  546. }
  547. }
  548. })
  549. urllib3.disable_warnings()
  550. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  551. Common.logger(log_type).info(f'触发机器人消息:{r}, {r.json()["StatusMessage"]}')
  552. except Exception as e:
  553. Common.logger(log_type).error(f"bot异常:{e}\n")
  554. if __name__ == "__main__":
  555. Feishu.bot("bot", "ssyy", "别紧张,还是我,测试一下")
  556. # print(Feishu.get_userid("shipinhao", "lijinchao"))
  557. pass