zhongqingkandian.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import time
  4. from datetime import datetime
  5. from datetime import date, timedelta
  6. from datetime import datetime
  7. from loguru import logger
  8. import os
  9. import requests
  10. import sys,os
  11. sys.path.append(os.getcwd())
  12. # from application.common.feishu.feishu_utils import FeishuUtils
  13. # from application.common.log import Local
  14. # from application.common.feishu.feishu_utils import FeishuUtils
  15. # from application.common.log import Local
  16. # -*- coding: utf-8 -*-
  17. # @Time: 2023/12/26
  18. # 统一获取当前时间 <class 'datetime.datetime'> 2022-04-14 20:13:51.244472
  19. now = datetime.now()
  20. # 昨天 <class 'str'> 2022-04-13
  21. yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
  22. # 今天 <class 'datetime.date'> 2022-04-14
  23. today = date.today()
  24. # 明天 <class 'str'> 2022-04-15
  25. tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
  26. # 使用 logger 模块生成日志
  27. @staticmethod
  28. def logger(platform, mode):
  29. """
  30. 使用 logger 模块生成日志
  31. """
  32. # 日志路径
  33. log_dir = f"./log_store/{platform}/"
  34. log_path = os.getcwd() + os.sep + log_dir
  35. if not os.path.isdir(log_path):
  36. os.makedirs(log_path)
  37. # 日志文件名
  38. # log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + f'-{crawler}-{log_type}.log'
  39. # log_name = datetime.datetime.now().strftime('%Y-%m-%d') + f'-{crawler}-{log_type}.log'
  40. # log_name = f"{date.today():%Y-%m-%d}-{crawler}-{log_type}.log"
  41. log_name = f"{platform}-{mode}-{datetime.now().date().strftime('%Y-%m-%d')}.log"
  42. # 日志不打印到控制台
  43. logger.remove(handler_id=None)
  44. # rotation="500 MB",实现每 500MB 存储一个文件
  45. # rotation="12:00",实现每天 12:00 创建一个文件
  46. # rotation="1 week",每周创建一个文件
  47. # retention="10 days",每隔10天之后就会清理旧的日志
  48. # 初始化日志
  49. # logger.add(f"{log_dir}{log_name}", level="INFO", rotation="00:00", retention="10 days", enqueue=True)
  50. logger.add(os.path.join(log_dir, log_name), level="INFO", rotation="00:00", retention="10 days", enqueue=True)
  51. return logger
  52. """
  53. 飞书表配置: token 鉴权 / 增删改查 / 机器人报警
  54. """
  55. import json
  56. import os
  57. import sys
  58. import requests
  59. import urllib3
  60. from loguru import logger
  61. proxies = {"http": None, "https": None}
  62. class FeishuUtils:
  63. """
  64. 编辑飞书云文档
  65. """
  66. succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
  67. # 飞书路径token
  68. @classmethod
  69. def spreadsheettoken(cls, crawler):
  70. if crawler == "summary":
  71. return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
  72. else:
  73. return crawler
  74. # 获取飞书api token
  75. @classmethod
  76. def get_token(cls):
  77. """
  78. 获取飞书api token
  79. :return:
  80. """
  81. url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
  82. post_data = {"app_id": "cli_a13ad2afa438d00b", # 这里账号密码是发布应用的后台账号及密码
  83. "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
  84. urllib3.disable_warnings()
  85. response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
  86. tenant_access_token = response.json()["tenant_access_token"]
  87. return tenant_access_token
  88. # 获取表格元数据
  89. @classmethod
  90. def get_metainfo(cls, crawler):
  91. """
  92. 获取表格元数据
  93. :return:
  94. """
  95. try:
  96. get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  97. + cls.spreadsheettoken(crawler) + "/metainfo"
  98. headers = {
  99. "Authorization": "Bearer " + cls.get_token(),
  100. "Content-Type": "application/json; charset=utf-8"
  101. }
  102. params = {
  103. "extFields": "protectedRange", # 额外返回的字段,extFields=protectedRange时返回保护行列信息
  104. "user_id_type": "open_id" # 返回的用户id类型,可选open_id,union_id
  105. }
  106. urllib3.disable_warnings()
  107. r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
  108. response = json.loads(r.content.decode("utf8"))
  109. return response
  110. except Exception as e:
  111. logger.error("获取表格元数据异常:{}", e)
  112. # 读取工作表中所有数据
  113. @classmethod
  114. def get_values_batch(cls, crawler, sheetid):
  115. """
  116. 读取工作表中所有数据
  117. :param crawler: 哪个爬虫
  118. :param sheetid: 哪张表
  119. :return: 所有数据
  120. """
  121. get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  122. + cls.spreadsheettoken(crawler) + "/values_batch_get"
  123. headers = {
  124. "Authorization": "Bearer " + cls.get_token(),
  125. "Content-Type": "application/json; charset=utf-8"
  126. }
  127. params = {
  128. "ranges": sheetid,
  129. "valueRenderOption": "ToString",
  130. "dateTimeRenderOption": "",
  131. "user_id_type": "open_id"
  132. }
  133. urllib3.disable_warnings()
  134. r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
  135. response = json.loads(r.content.decode("utf8"))
  136. values = response["data"]["valueRanges"][0]["values"]
  137. return values
  138. # 工作表,插入行或列
  139. @classmethod
  140. def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
  141. """
  142. 工作表插入行或列
  143. :param log_type: 日志路径
  144. :param crawler: 哪个爬虫的云文档
  145. :param sheetid:哪张工作表
  146. :param majordimension:行或者列, ROWS、COLUMNS
  147. :param startindex:开始位置
  148. :param endindex:结束位置
  149. """
  150. try:
  151. insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  152. + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
  153. headers = {
  154. "Authorization": "Bearer " + cls.get_token(),
  155. "Content-Type": "application/json; charset=utf-8"
  156. }
  157. body = {
  158. "dimension": {
  159. "sheetId": sheetid,
  160. "majorDimension": majordimension, # 默认 ROWS ,可选 ROWS、COLUMNS
  161. "startIndex": startindex, # 开始的位置
  162. "endIndex": endindex # 结束的位置
  163. },
  164. "inheritStyle": "AFTER" # BEFORE 或 AFTER,不填为不继承 style
  165. }
  166. urllib3.disable_warnings()
  167. r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
  168. except Exception as e:
  169. logger.error("插入行或列异常:{}", e)
  170. # 写入数据
  171. @classmethod
  172. def update_values(cls, crawler, sheetid, ranges, values):
  173. """
  174. 写入数据
  175. :param log_type: 日志路径
  176. :param crawler: 哪个爬虫的云文档
  177. :param sheetid:哪张工作表
  178. :param ranges:单元格范围
  179. :param values:写入的具体数据,list
  180. """
  181. try:
  182. update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  183. + cls.spreadsheettoken(crawler) + "/values_batch_update"
  184. headers = {
  185. "Authorization": "Bearer " + cls.get_token(),
  186. "Content-Type": "application/json; charset=utf-8"
  187. }
  188. body = {
  189. "valueRanges": [
  190. {
  191. "range": sheetid + "!" + ranges,
  192. "values": values
  193. },
  194. ],
  195. }
  196. urllib3.disable_warnings()
  197. r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
  198. except Exception as e:
  199. logger.error("写入数据异常:{}", e)
  200. # 合并单元格
  201. @classmethod
  202. def merge_cells(cls, crawler, sheetid, ranges):
  203. """
  204. 合并单元格
  205. :param log_type: 日志路径
  206. :param crawler: 哪个爬虫
  207. :param sheetid:哪张工作表
  208. :param ranges:需要合并的单元格范围
  209. """
  210. try:
  211. merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  212. + cls.spreadsheettoken(crawler) + "/merge_cells"
  213. headers = {
  214. "Authorization": "Bearer " + cls.get_token(),
  215. "Content-Type": "application/json; charset=utf-8"
  216. }
  217. body = {
  218. "range": sheetid + "!" + ranges,
  219. "mergeType": "MERGE_ROWS"
  220. }
  221. urllib3.disable_warnings()
  222. r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
  223. except Exception as e:
  224. logger.error("合并单元格异常:{}", e)
  225. # 读取单元格数据
  226. @classmethod
  227. def get_range_value(cls, crawler, sheetid, cell):
  228. """
  229. 读取单元格内容
  230. :param log_type: 日志路径
  231. :param crawler: 哪个爬虫
  232. :param sheetid: 哪张工作表
  233. :param cell: 哪个单元格
  234. :return: 单元格内容
  235. """
  236. try:
  237. get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  238. + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
  239. headers = {
  240. "Authorization": "Bearer " + cls.get_token(),
  241. "Content-Type": "application/json; charset=utf-8"
  242. }
  243. params = {
  244. "valueRenderOption": "FormattedValue",
  245. # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
  246. "dateTimeRenderOption": "",
  247. # 返回的用户id类型,可选open_id,union_id
  248. "user_id_type": "open_id"
  249. }
  250. urllib3.disable_warnings()
  251. r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
  252. # logger.error(r.text)
  253. return r.json()["data"]["valueRange"]["values"][0]
  254. except Exception as e:
  255. logger.error("读取单元格数据异常:{}", e)
  256. # 获取表内容
  257. @classmethod
  258. def get_sheet_content(cls, crawler, sheet_id):
  259. try:
  260. sheet = Feishu.get_values_batch(crawler, sheet_id)
  261. content_list = []
  262. for x in sheet:
  263. for y in x:
  264. if y is None:
  265. pass
  266. else:
  267. content_list.append(y)
  268. return content_list
  269. except Exception as e:
  270. logger.error(f'get_sheet_content:{e}\n')
  271. # 删除行或列,可选 ROWS、COLUMNS
  272. @classmethod
  273. def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
  274. """
  275. 删除行或列
  276. :param log_type: 日志路径
  277. :param crawler: 哪个爬虫
  278. :param sheetid:工作表
  279. :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
  280. :param startindex:开始的位置
  281. :param endindex:结束的位置
  282. :return:
  283. """
  284. try:
  285. dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
  286. + cls.spreadsheettoken(crawler) + "/dimension_range"
  287. headers = {
  288. "Authorization": "Bearer " + cls.get_token(),
  289. "Content-Type": "application/json; charset=utf-8"
  290. }
  291. body = {
  292. "dimension": {
  293. "sheetId": sheetid,
  294. "majorDimension": major_dimension,
  295. "startIndex": startindex,
  296. "endIndex": endindex
  297. }
  298. }
  299. urllib3.disable_warnings()
  300. r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
  301. except Exception as e:
  302. logger.error("删除视频数据异常:{}", e)
  303. # 获取用户 ID
  304. @classmethod
  305. def get_userid(cls, username):
  306. try:
  307. url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
  308. headers = {
  309. "Authorization": "Bearer " + cls.get_token(),
  310. "Content-Type": "application/json; charset=utf-8"
  311. }
  312. name_phone_dict = {
  313. "xinxin": "15546206651",
  314. "muxinyi": "13699208058",
  315. "wangxueke": "13513479926",
  316. "yuzhuoyi": "18624010360",
  317. "luojunhui": "18801281360",
  318. "fanjun": "15200827642",
  319. "zhangyong": "17600025055",
  320. 'liukunyu': "18810931977"
  321. }
  322. username = name_phone_dict.get(username)
  323. data = {"mobiles": [username]}
  324. urllib3.disable_warnings()
  325. r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
  326. open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
  327. return open_id
  328. except Exception as e:
  329. pass
  330. # logger.error(f"get_userid异常:{e}\n")
  331. # 飞书机器人
  332. @classmethod
  333. def bot(cls, log_type, crawler, text, mark_name):
  334. try:
  335. headers = {'Content-Type': 'application/json'}
  336. if crawler == "机器自动改造消息通知":
  337. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  338. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  339. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  340. elif crawler == "快手关键词搜索":
  341. url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
  342. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
  343. users = "".join([f'<at id="{cls.get_userid(type)}">{name}</at>' for type, name in
  344. zip(log_type, mark_name)])
  345. # users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  346. else:
  347. url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
  348. sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
  349. users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
  350. data = json.dumps({
  351. "msg_type": "interactive",
  352. "card": {
  353. "config": {
  354. "wide_screen_mode": True,
  355. "enable_forward": True
  356. },
  357. "elements": [{
  358. "tag": "div",
  359. "text": {
  360. "content": users + text,
  361. "tag": "lark_md"
  362. }
  363. }, {
  364. "actions": [{
  365. "tag": "button",
  366. "text": {
  367. "content": "详情,点击~~~~~",
  368. "tag": "lark_md"
  369. },
  370. "url": sheet_url,
  371. "type": "default",
  372. "value": {}
  373. }],
  374. "tag": "action"
  375. }],
  376. "header": {
  377. "title": {
  378. "content": "📣消息提醒",
  379. "tag": "plain_text"
  380. }
  381. }
  382. }
  383. })
  384. urllib3.disable_warnings()
  385. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  386. except Exception as e:
  387. logger.error(f"bot异常:{e}\n")
  388. # 飞书机器人-改造计划完成通知
  389. @classmethod
  390. def finish_bot(cls, text, url, content):
  391. try:
  392. headers = {'Content-Type': 'application/json'}
  393. data = json.dumps({
  394. "msg_type": "interactive",
  395. "card": {
  396. "config": {
  397. "wide_screen_mode": True,
  398. "enable_forward": True
  399. },
  400. "elements": [{
  401. "tag": "div",
  402. "text": {
  403. "content": text,
  404. "tag": "lark_md"
  405. }
  406. }],
  407. "header": {
  408. "title": {
  409. "content": content,
  410. "tag": "plain_text"
  411. }
  412. }
  413. }
  414. })
  415. urllib3.disable_warnings()
  416. r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
  417. except Exception as e:
  418. logger.error(f"bot异常:{e}\n")
  419. class ZhongQingKanDian:
  420. API_BASE_URL = "http://8.217.192.46:8889"
  421. COMMON_HEADERS = {
  422. "Content-Type": "application/json"
  423. }
  424. MAX_RETRIES = 3
  425. TIMEOUT = 10 # 设置超时时间
  426. def __init__(self):
  427. self.session = requests.Session()
  428. self.session.headers.update(self.COMMON_HEADERS)
  429. # 初始化请求次数计数器
  430. self.recommend_list_request_count = 0
  431. self.content_recommend_list_request_count = 0
  432. self.detail_request_count = 0
  433. def send_request(self, endpoint, data):
  434. full_url = f"{self.API_BASE_URL}{endpoint}"
  435. for retry in range(self.MAX_RETRIES):
  436. try:
  437. response = self.session.post(full_url, data=data, timeout=self.TIMEOUT)
  438. response.raise_for_status()
  439. return response.json()
  440. except requests.RequestException as e:
  441. Local.logger("zhongqingkandian", "recommend").info(
  442. f"请求 {full_url} 失败(第 {retry + 1} 次重试): {e}")
  443. if retry < self.MAX_RETRIES - 1:
  444. time.sleep(2)
  445. except json.JSONDecodeError as e:
  446. Local.logger("zhongqingkandian", "recommend").info(
  447. f"解析 {full_url} 的响应数据失败(第 {retry + 1} 次重试): {e}")
  448. # print(f"解析 {full_url} 的响应数据失败(第 {retry + 1} 次重试): {e}")
  449. if retry < self.MAX_RETRIES - 1:
  450. time.sleep(2)
  451. return None
  452. def is_response_valid(self, resp):
  453. if resp and resp.get("code", -1) == 0:
  454. data = resp.get("data", {}).get("data")
  455. return data is not None
  456. return False
  457. def req_recommend_list(self):
  458. url = '/crawler/zhong_qing_kan_dian/recommend'
  459. body = json.dumps({"cursor": ""})
  460. resp = self.send_request(url, body)
  461. if self.is_response_valid(resp):
  462. self.recommend_list_request_count += 1
  463. Local.logger("zhongqingkandian", "recommend").info(f"请求推荐流的总次数: {self.recommend_list_request_count}响应:{resp}")
  464. return resp["data"]["data"]
  465. Local.logger("zhongqingkandian", "recommend").info(
  466. f"请求推荐流失败,返回异常: {resp}")
  467. return None
  468. def req_content_recommend_list(self, content_id):
  469. url = '/crawler/zhong_qing_kan_dian/related'
  470. body = json.dumps({
  471. "content_id": str(content_id),
  472. "cursor": ""
  473. })
  474. resp = self.send_request(url, body)
  475. if self.is_response_valid(resp):
  476. self.content_recommend_list_request_count += 1
  477. Local.logger("zhongqingkandian", "recommend").info(f"请求内容相关推荐流的总次数: {self.content_recommend_list_request_count}响应:{resp}")
  478. return resp["data"]["data"]
  479. Local.logger("zhongqingkandian", "recommend").info(
  480. f"请求内容相关推荐流失败,返回异常: {resp}")
  481. return None
  482. def req_detail(self, content_link, label):
  483. url = '/crawler/zhong_qing_kan_dian/detail'
  484. body = json.dumps({
  485. "content_link": content_link
  486. })
  487. resp = self.send_request(url, body)
  488. if resp and resp.get("code") == 0:
  489. self.detail_request_count += 1
  490. Local.logger("zhongqingkandian", "recommend").info(f"请求详情的总次数: {self.detail_request_count}")
  491. data = resp["data"]["data"]
  492. if data["content_type"] == "video":
  493. video_id = data['channel_content_id']
  494. video_title = data["title"]
  495. video_cover = data["image_url_list"][0]['image_url']
  496. video_url = data["video_url_list"][0]['video_url']
  497. video_duration = data["video_url_list"][0]['video_duration']
  498. account_id = data["channel_account_id"]
  499. account_name = data["channel_account_name"]
  500. account_avatar = data["avatar"]
  501. values = [
  502. [
  503. video_title,
  504. video_url,
  505. video_duration,
  506. video_cover,
  507. video_id,
  508. content_link,
  509. account_name,
  510. account_id,
  511. account_avatar,
  512. label,
  513. ]
  514. ]
  515. FeishuUtils.insert_columns("BvScsJKDWhuj1ctUX1mcBzq1nYb", "a338b3", "ROWS", 1, 2)
  516. time.sleep(0.5)
  517. FeishuUtils.update_values("BvScsJKDWhuj1ctUX1mcBzq1nYb", "a338b3", "A2:Z2", values)
  518. else:
  519. Local.logger("zhongqingkandian", "recommend").info(f"不是视频")
  520. else:
  521. Local.logger("zhongqingkandian", "recommend").info(f"请求详情失败,返回异常: {resp}")
  522. return None
  523. def control_request(self):
  524. recommend_list = self.req_recommend_list()
  525. if recommend_list:
  526. for video_obj in recommend_list:
  527. content_link = video_obj.get("share_url")
  528. content_id = video_obj.get("id")
  529. if content_link and content_id:
  530. time.sleep(2)
  531. detail = self.req_detail(content_link, "推荐")
  532. if detail:
  533. print(detail)
  534. time.sleep(10)
  535. content_recommend_list = self.req_content_recommend_list(content_id)
  536. if content_recommend_list:
  537. for content_obj in content_recommend_list:
  538. content_link = content_obj.get("share_info", {}).get("share_url")
  539. if content_link:
  540. res = self.req_detail(content_link, "内容相关推荐")
  541. if res:
  542. print(res)
  543. def run(self):
  544. while True:
  545. self.control_request()
  546. if __name__ == '__main__':
  547. ZhongQingKanDian().run()
  548. # ZhongQingKanDian().req_detail('https://vol.youth.cn/1qWiCPOjl1CUewP5?signature=bDjmABzyXE32GNxlOY4pJVbdZfDqw9naZ9vnQ58wq06peMdkrP','ceshi')