zhongqingkandian.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import time
  4. from datetime import datetime
  5. from datetime import date, timedelta
  6. from datetime import datetime
  7. from loguru import logger
  8. import os
  9. import requests
  10. import sys,os
  11. sys.path.append(os.getcwd())
  12. # from application.common.feishu.feishu_utils import FeishuUtils
  13. # from application.common.log import Local
  14. # from application.common.feishu.feishu_utils import FeishuUtils
  15. # from application.common.log import Local
  16. class Local(object):
  17. # 统一获取当前时间 <class 'datetime.datetime'> 2022-04-14 20:13:51.244472
  18. now = datetime.now()
  19. # 昨天 <class 'str'> 2022-04-13
  20. yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
  21. # 今天 <class 'datetime.date'> 2022-04-14
  22. today = date.today()
  23. # 明天 <class 'str'> 2022-04-15
  24. tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
  25. # 使用 logger 模块生成日志
  26. @staticmethod
  27. def logger(platform, mode):
  28. """
  29. 使用 logger 模块生成日志
  30. """
  31. # 日志路径
  32. log_dir = f"./log_store/{platform}/"
  33. log_path = os.getcwd() + os.sep + log_dir
  34. if not os.path.isdir(log_path):
  35. os.makedirs(log_path)
  36. # 日志文件名
  37. # log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + f'-{crawler}-{log_type}.log'
  38. # log_name = datetime.datetime.now().strftime('%Y-%m-%d') + f'-{crawler}-{log_type}.log'
  39. # log_name = f"{date.today():%Y-%m-%d}-{crawler}-{log_type}.log"
  40. log_name = f"{platform}-{mode}-{datetime.now().date().strftime('%Y-%m-%d')}.log"
  41. # 日志不打印到控制台
  42. logger.remove(handler_id=None)
  43. # rotation="500 MB",实现每 500MB 存储一个文件
  44. # rotation="12:00",实现每天 12:00 创建一个文件
  45. # rotation="1 week",每周创建一个文件
  46. # retention="10 days",每隔10天之后就会清理旧的日志
  47. # 初始化日志
  48. # logger.add(f"{log_dir}{log_name}", level="INFO", rotation="00:00", retention="10 days", enqueue=True)
  49. logger.add(os.path.join(log_dir, log_name), level="INFO", rotation="00:00", retention="10 days", enqueue=True)
  50. return logger
  51. """
  52. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  53. """
  54. import json
  55. import os
  56. import sys
  57. import requests
  58. import urllib3
  59. from loguru import logger
  60. proxies = {"http": None, "https": None}
  61. class FeishuUtils:
  62. """
  63. 编辑飞书云文档
  64. """
  65. succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
  66. # 飞书路径token
  67. @classmethod
  68. def spreadsheettoken(cls, crawler):
  69. if crawler == "summary":
  70. return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
  71. else:
  72. return crawler
  73. # 获取飞书api token
  74. @classmethod
  75. def get_token(cls):
  76. """
  77. 获取飞书api token
  78. :return:
  79. """
  80. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  81. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  82. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  83. urllib3.disable_warnings()
  84. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  85. tenant_access_token = response.json()["tenant_access_token"]
  86. return tenant_access_token
  87. # 获取表格元数据
  88. @classmethod
  89. def get_metainfo(cls, crawler):
  90. """
  91. 获取表格元数据
  92. :return:
  93. """
  94. try:
  95. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  96. + cls.spreadsheettoken(crawler) + "/metainfo"
  97. headers = {
  98. "Authorization": "Bearer " + cls.get_token(),
  99. "Content-Type": "application/json; charset=utf-8"
  100. }
  101. params = {
  102. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  103. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  104. }
  105. urllib3.disable_warnings()
  106. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  107. response = json.loads(r.content.decode("utf8"))
  108. return response
  109. except Exception as e:
  110. logger.error("获取表格元数据异常:{}", e)
  111. # 读取工作表中所有数据
  112. @classmethod
  113. def get_values_batch(cls, crawler, sheetid):
  114. """
  115. 读取工作表中所有数据
  116. :param crawler: 哪个爬虫
  117. :param sheetid: 哪张表
  118. :return: 所有数据
  119. """
  120. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  121. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  122. headers = {
  123. "Authorization": "Bearer " + cls.get_token(),
  124. "Content-Type": "application/json; charset=utf-8"
  125. }
  126. params = {
  127. "ranges": sheetid,
  128. "valueRenderOption": "ToString",
  129. "dateTimeRenderOption": "",
  130. "user_id_type": "open_id"
  131. }
  132. urllib3.disable_warnings()
  133. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  134. response = json.loads(r.content.decode("utf8"))
  135. values = response["data"]["valueRanges"][0]["values"]
  136. return values
  137. # 工作表,插入行或列
  138. @classmethod
  139. def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
  140. """
  141. 工作表插入行或列
  142. :param log_type: 日志路径
  143. :param crawler: 哪个爬虫的云文档
  144. :param sheetid:哪张工作表
  145. :param majordimension:行或者列, ROWS、COLUMNS
  146. :param startindex:开始位置
  147. :param endindex:结束位置
  148. """
  149. try:
  150. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  151. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  152. headers = {
  153. "Authorization": "Bearer " + cls.get_token(),
  154. "Content-Type": "application/json; charset=utf-8"
  155. }
  156. body = {
  157. "dimension": {
  158. "sheetId": sheetid,
  159. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  160. "startIndex": startindex, # 开始的位置
  161. "endIndex": endindex # 结束的位置
  162. },
  163. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  164. }
  165. urllib3.disable_warnings()
  166. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  167. except Exception as e:
  168. logger.error("插入行或列异常:{}", e)
  169. # 写入数据
  170. @classmethod
  171. def update_values(cls, crawler, sheetid, ranges, values):
  172. """
  173. 写入数据
  174. :param log_type: 日志路径
  175. :param crawler: 哪个爬虫的云文档
  176. :param sheetid:哪张工作表
  177. :param ranges:单元格范围
  178. :param values:写入的具体数据,list
  179. """
  180. try:
  181. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  182. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  183. headers = {
  184. "Authorization": "Bearer " + cls.get_token(),
  185. "Content-Type": "application/json; charset=utf-8"
  186. }
  187. body = {
  188. "valueRanges": [
  189. {
  190. "range": sheetid + "!" + ranges,
  191. "values": values
  192. },
  193. ],
  194. }
  195. urllib3.disable_warnings()
  196. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  197. except Exception as e:
  198. logger.error("写入数据异常:{}", e)
  199. # 合并单元格
  200. @classmethod
  201. def merge_cells(cls, crawler, sheetid, ranges):
  202. """
  203. 合并单元格
  204. :param log_type: 日志路径
  205. :param crawler: 哪个爬虫
  206. :param sheetid:哪张工作表
  207. :param ranges:需要合并的单元格范围
  208. """
  209. try:
  210. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  211. + cls.spreadsheettoken(crawler) + "/merge_cells"
  212. headers = {
  213. "Authorization": "Bearer " + cls.get_token(),
  214. "Content-Type": "application/json; charset=utf-8"
  215. }
  216. body = {
  217. "range": sheetid + "!" + ranges,
  218. "mergeType": "MERGE_ROWS"
  219. }
  220. urllib3.disable_warnings()
  221. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  222. except Exception as e:
  223. logger.error("合并单元格异常:{}", e)
  224. # 读取单元格数据
  225. @classmethod
  226. def get_range_value(cls, crawler, sheetid, cell):
  227. """
  228. 读取单元格内容
  229. :param log_type: 日志路径
  230. :param crawler: 哪个爬虫
  231. :param sheetid: 哪张工作表
  232. :param cell: 哪个单元格
  233. :return: 单元格内容
  234. """
  235. try:
  236. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  237. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  238. headers = {
  239. "Authorization": "Bearer " + cls.get_token(),
  240. "Content-Type": "application/json; charset=utf-8"
  241. }
  242. params = {
  243. "valueRenderOption": "FormattedValue",
  244. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  245. "dateTimeRenderOption": "",
  246. # 返回的用户id类型,可选open_id,union_id
  247. "user_id_type": "open_id"
  248. }
  249. urllib3.disable_warnings()
  250. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  251. # logger.error(r.text)
  252. return r.json()["data"]["valueRange"]["values"][0]
  253. except Exception as e:
  254. logger.error("读取单元格数据异常:{}", e)
  255. # 获取表内容
  256. @classmethod
  257. def get_sheet_content(cls, crawler, sheet_id):
  258. try:
  259. sheet = Feishu.get_values_batch(crawler, sheet_id)
  260. content_list = []
  261. for x in sheet:
  262. for y in x:
  263. if y is None:
  264. pass
  265. else:
  266. content_list.append(y)
  267. return content_list
  268. except Exception as e:
  269. logger.error(f'get_sheet_content:{e}\n')
  270. # 删除行或列,可选 ROWS、COLUMNS
  271. @classmethod
  272. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  273. """
  274. 删除行或列
  275. :param log_type: 日志路径
  276. :param crawler: 哪个爬虫
  277. :param sheetid:工作表
  278. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  279. :param startindex:开始的位置
  280. :param endindex:结束的位置
  281. :return:
  282. """
  283. try:
  284. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  285. + cls.spreadsheettoken(crawler) + "/dimension_range"
  286. headers = {
  287. "Authorization": "Bearer " + cls.get_token(),
  288. "Content-Type": "application/json; charset=utf-8"
  289. }
  290. body = {
  291. "dimension": {
  292. "sheetId": sheetid,
  293. "majorDimension": major_dimension,
  294. "startIndex": startindex,
  295. "endIndex": endindex
  296. }
  297. }
  298. urllib3.disable_warnings()
  299. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  300. except Exception as e:
  301. logger.error("删除视频数据异常:{}", e)
  302. # 获取用户 ID
  303. @classmethod
  304. def get_userid(cls, username):
  305. try:
  306. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  307. headers = {
  308. "Authorization": "Bearer " + cls.get_token(),
  309. "Content-Type": "application/json; charset=utf-8"
  310. }
  311. name_phone_dict = {
  312. "xinxin": "15546206651",
  313. "muxinyi": "13699208058",
  314. "wangxueke": "13513479926",
  315. "yuzhuoyi": "18624010360",
  316. "luojunhui": "18801281360",
  317. "fanjun": "15200827642",
  318. "zhangyong": "17600025055",
  319. 'liukunyu': "18810931977"
  320. }
  321. username = name_phone_dict.get(username)
  322. data = {"mobiles": [username]}
  323. urllib3.disable_warnings()
  324. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  325. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  326. return open_id
  327. except Exception as e:
  328. pass
  329. # logger.error(f"get_userid异常:{e}\n")
  330. # 飞书机器人
  331. @classmethod
  332. def bot(cls, log_type, crawler, text, mark_name):
  333. try:
  334. headers = {'Content-Type': 'application/json'}
  335. if crawler == "机器自动改造消息通知":
  336. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  337. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  338. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  339. elif crawler == "快手关键词搜索":
  340. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  341. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
  342. users = "".join([f'<at id="{cls.get_userid(type)}">{name}</at>' for type, name in
  343. zip(log_type, mark_name)])
  344. # users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  345. else:
  346. url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
  347. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  348. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  349. data = json.dumps({
  350. "msg_type": "interactive",
  351. "card": {
  352. "config": {
  353. "wide_screen_mode": True,
  354. "enable_forward": True
  355. },
  356. "elements": [{
  357. "tag": "div",
  358. "text": {
  359. "content": users + text,
  360. "tag": "lark_md"
  361. }
  362. }, {
  363. "actions": [{
  364. "tag": "button",
  365. "text": {
  366. "content": "详情,点击~~~~~",
  367. "tag": "lark_md"
  368. },
  369. "url": sheet_url,
  370. "type": "default",
  371. "value": {}
  372. }],
  373. "tag": "action"
  374. }],
  375. "header": {
  376. "title": {
  377. "content": "📣消息提醒",
  378. "tag": "plain_text"
  379. }
  380. }
  381. }
  382. })
  383. urllib3.disable_warnings()
  384. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  385. except Exception as e:
  386. logger.error(f"bot异常:{e}\n")
  387. # 飞书机器人-改造计划完成通知
  388. @classmethod
  389. def finish_bot(cls, text, url, content):
  390. try:
  391. headers = {'Content-Type': 'application/json'}
  392. data = json.dumps({
  393. "msg_type": "interactive",
  394. "card": {
  395. "config": {
  396. "wide_screen_mode": True,
  397. "enable_forward": True
  398. },
  399. "elements": [{
  400. "tag": "div",
  401. "text": {
  402. "content": text,
  403. "tag": "lark_md"
  404. }
  405. }],
  406. "header": {
  407. "title": {
  408. "content": content,
  409. "tag": "plain_text"
  410. }
  411. }
  412. }
  413. })
  414. urllib3.disable_warnings()
  415. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  416. except Exception as e:
  417. logger.error(f"bot异常:{e}\n")
  418. class ZhongQingKanDian:
  419. API_BASE_URL = "http://8.217.192.46:8889"
  420. COMMON_HEADERS = {
  421. "Content-Type": "application/json"
  422. }
  423. MAX_RETRIES = 3
  424. TIMEOUT = 10 # 设置超时时间
  425. def __init__(self):
  426. self.session = requests.Session()
  427. self.session.headers.update(self.COMMON_HEADERS)
  428. # 初始化请求次数计数器
  429. self.recommend_list_request_count = 0
  430. self.content_recommend_list_request_count = 0
  431. self.detail_request_count = 0
  432. def send_request(self, endpoint, data):
  433. full_url = f"{self.API_BASE_URL}{endpoint}"
  434. for retry in range(self.MAX_RETRIES):
  435. try:
  436. response = self.session.post(full_url, data=data, timeout=self.TIMEOUT)
  437. response.raise_for_status()
  438. return response.json()
  439. except requests.RequestException as e:
  440. Local.logger("zhongqingkandian", "recommend").info(
  441. f"请求 {full_url} 失败(第 {retry + 1} 次重试): {e}")
  442. if retry < self.MAX_RETRIES - 1:
  443. time.sleep(2)
  444. except json.JSONDecodeError as e:
  445. Local.logger("zhongqingkandian", "recommend").info(
  446. f"解析 {full_url} 的响应数据失败(第 {retry + 1} 次重试): {e}")
  447. # print(f"解析 {full_url} 的响应数据失败(第 {retry + 1} 次重试): {e}")
  448. if retry < self.MAX_RETRIES - 1:
  449. time.sleep(2)
  450. return None
  451. def is_response_valid(self, resp):
  452. if resp and resp.get("code", -1) == 0:
  453. data = resp.get("data", {}).get("data")
  454. return data is not None
  455. return False
  456. def req_recommend_list(self):
  457. url = '/crawler/zhong_qing_kan_dian/recommend'
  458. body = json.dumps({"cursor": ""})
  459. resp = self.send_request(url, body)
  460. if self.is_response_valid(resp):
  461. self.recommend_list_request_count += 1
  462. Local.logger("zhongqingkandian", "recommend").info(f"请求推荐流的总次数: {self.recommend_list_request_count}响应:{resp}")
  463. return resp["data"]["data"]
  464. Local.logger("zhongqingkandian", "recommend").info(
  465. f"请求推荐流失败,返回异常: {resp}")
  466. return None
  467. def req_content_recommend_list(self, content_id):
  468. url = '/crawler/zhong_qing_kan_dian/related'
  469. body = json.dumps({
  470. "content_id": str(content_id),
  471. "cursor": ""
  472. })
  473. resp = self.send_request(url, body)
  474. if self.is_response_valid(resp):
  475. self.content_recommend_list_request_count += 1
  476. Local.logger("zhongqingkandian", "recommend").info(f"请求内容相关推荐流的总次数: {self.content_recommend_list_request_count}响应:{resp}")
  477. return resp["data"]["data"]
  478. Local.logger("zhongqingkandian", "recommend").info(
  479. f"请求内容相关推荐流失败,返回异常: {resp}")
  480. return None
  481. def req_detail(self, content_link, label):
  482. url = '/crawler/zhong_qing_kan_dian/detail'
  483. body = json.dumps({
  484. "content_link": content_link
  485. })
  486. resp = self.send_request(url, body)
  487. if resp and resp.get("code") == 0:
  488. self.detail_request_count += 1
  489. Local.logger("zhongqingkandian", "recommend").info(f"请求详情的总次数: {self.detail_request_count}")
  490. data = resp["data"]["data"]
  491. if data["content_type"] == "video":
  492. video_id = data['channel_content_id']
  493. video_title = data["title"]
  494. video_cover = data["image_url_list"][0]['image_url']
  495. video_url = data["video_url_list"][0]['video_url']
  496. video_duration = data["video_url_list"][0]['video_duration']
  497. account_id = data["channel_account_id"]
  498. account_name = data["channel_account_name"]
  499. account_avatar = data["avatar"]
  500. values = [
  501. [
  502. video_title,
  503. video_url,
  504. video_duration,
  505. video_cover,
  506. video_id,
  507. content_link,
  508. account_name,
  509. account_id,
  510. account_avatar,
  511. label,
  512. ]
  513. ]
  514. FeishuUtils.insert_columns("BvScsJKDWhuj1ctUX1mcBzq1nYb", "a338b3", "ROWS", 1, 2)
  515. time.sleep(0.5)
  516. FeishuUtils.update_values("BvScsJKDWhuj1ctUX1mcBzq1nYb", "a338b3", "A2:Z2", values)
  517. else:
  518. Local.logger("zhongqingkandian", "recommend").info(f"不是视频")
  519. else:
  520. Local.logger("zhongqingkandian", "recommend").info(f"请求详情失败,返回异常: {resp}")
  521. return None
  522. def control_request(self):
  523. recommend_list = self.req_recommend_list()
  524. if recommend_list:
  525. for video_obj in recommend_list:
  526. content_link = video_obj.get("share_url")
  527. content_id = video_obj.get("id")
  528. if content_link and content_id:
  529. time.sleep(2)
  530. detail = self.req_detail(content_link, "推荐")
  531. if detail:
  532. print(detail)
  533. time.sleep(10)
  534. content_recommend_list = self.req_content_recommend_list(content_id)
  535. if content_recommend_list:
  536. for content_obj in content_recommend_list:
  537. content_link = content_obj.get("share_info", {}).get("share_url")
  538. if content_link:
  539. res = self.req_detail(content_link, "内容相关推荐")
  540. if res:
  541. print(res)
  542. def run(self):
  543. while True:
  544. self.control_request()
  545. if __name__ == '__main__':
  546. ZhongQingKanDian().run()
  547. # ZhongQingKanDian().req_detail('https://vol.youth.cn/1qWiCPOjl1CUewP5?signature=bDjmABzyXE32GNxlOY4pJVbdZfDqw9naZ9vnQ58wq06peMdkrP','ceshi')