xg_recommend.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/7/10
  4. import base64
  5. import datetime
  6. import json
  7. import os
  8. import random
  9. import string
  10. import subprocess
  11. import sys
  12. import time
  13. import requests
  14. import urllib3
  15. import re
  16. from requests.adapters import HTTPAdapter
  17. from selenium import webdriver
  18. from selenium.webdriver import DesiredCapabilities
  19. from selenium.webdriver.chrome.service import Service
  20. from selenium.webdriver.chrome.webdriver import WebDriver
  21. from selenium.webdriver.common.by import By
  22. sys.path.append(os.getcwd())
  23. from common.mq import MQ
  24. from common.feishu import Feishu
  25. from common.public import download_rule, get_config_from_mysql
  26. from common.common import Common
  27. from common.scheduling_db import MysqlHelper
  28. from common.userAgent import get_random_user_agent
  29. class XiguaRecommend:
  30. platform = "xigua"
  31. @classmethod
  32. def random_signature(cls):
  33. src_digits = string.digits # string_数字
  34. src_uppercase = string.ascii_uppercase # string_大写字母
  35. src_lowercase = string.ascii_lowercase # string_小写字母
  36. digits_num = random.randint(1, 6)
  37. uppercase_num = random.randint(1, 26 - digits_num - 1)
  38. lowercase_num = 26 - (digits_num + uppercase_num)
  39. password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
  40. src_lowercase, lowercase_num)
  41. random.shuffle(password)
  42. new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
  43. new_password_start = new_password[0:18]
  44. new_password_end = new_password[-7:]
  45. if new_password[18] == '8':
  46. new_password = new_password_start + 'w' + new_password_end
  47. elif new_password[18] == '9':
  48. new_password = new_password_start + 'x' + new_password_end
  49. elif new_password[18] == '-':
  50. new_password = new_password_start + 'y' + new_password_end
  51. elif new_password[18] == '.':
  52. new_password = new_password_start + 'z' + new_password_end
  53. else:
  54. new_password = new_password_start + 'y' + new_password_end
  55. return new_password
  56. @classmethod
  57. def get_video_url(cls, video_info):
  58. video_url_dict = {}
  59. video_resource = video_info.get('videoResource', {})
  60. dash_120fps = video_resource.get('dash_120fps', {})
  61. normal = video_resource.get('normal', {})
  62. # 从dash_120fps和normal字典中获取video_list字典
  63. video_list = dash_120fps.get('video_list', {}) or normal.get('video_list', {})
  64. # 获取video_list字典中的video_4、video_3、video_2或video_1的值。如果找到非空视频URL,则将其赋值给变量video_url。否则,将赋值为空字符串。
  65. video = video_list.get('video_4') or video_list.get('video_3') or video_list.get('video_2') or video_list.get('video_1')
  66. video_url = video.get('backup_url_1', '') if video else ''
  67. audio_url = video.get('backup_url_1', '') if video else ''
  68. video_width = video.get('vwidth', 0) if video else 0
  69. video_height = video.get('vheight', 0) if video else 0
  70. video_url = re.sub(r'[^a-zA-Z0-9+/=]', '', video_url) # 从视频URL中删除特殊字符
  71. audio_url = re.sub(r'[^a-zA-Z0-9+/=]', '', audio_url) # 从音频URL中删除特殊字符
  72. video_url = base64.b64decode(video_url).decode('utf8') # 解码视频URL
  73. audio_url = base64.b64decode(audio_url).decode('utf8') # 解码音频URL
  74. video_url_dict["video_url"] = video_url
  75. video_url_dict["audio_url"] = audio_url
  76. video_url_dict["video_width"] = video_width
  77. video_url_dict["video_height"] = video_height
  78. return video_url_dict
  79. @classmethod
  80. def get_comment_cnt(cls, item_id):
  81. url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
  82. params = {
  83. "tab_index": "0",
  84. "count": "10",
  85. "offset": "10",
  86. "group_id": str(item_id),
  87. "item_id": str(item_id),
  88. "aid": "1768",
  89. "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
  90. "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
  91. "_signature": cls.random_signature(),
  92. }
  93. headers = {
  94. 'authority': 'www.ixigua.com',
  95. 'accept': 'application/json, text/plain, */*',
  96. 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  97. 'cache-control': 'no-cache',
  98. 'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
  99. 'pragma': 'no-cache',
  100. 'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
  101. 'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
  102. 'sec-ch-ua-mobile': '?0',
  103. 'sec-ch-ua-platform': '"macOS"',
  104. 'sec-fetch-dest': 'empty',
  105. 'sec-fetch-mode': 'cors',
  106. 'sec-fetch-site': 'same-origin',
  107. 'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
  108. 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
  109. 'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
  110. }
  111. urllib3.disable_warnings()
  112. s = requests.session()
  113. # max_retries=3 重试3次
  114. s.mount('http://', HTTPAdapter(max_retries=3))
  115. s.mount('https://', HTTPAdapter(max_retries=3))
  116. response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(),
  117. timeout=5)
  118. response.close()
  119. if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
  120. return 0
  121. return response.json().get("total_number", 0)
  122. # 获取视频详情
  123. @classmethod
  124. def get_video_info(cls, log_type, crawler, item_id):
  125. url = 'https://www.ixigua.com/api/mixVideo/information?'
  126. headers = {
  127. "accept-encoding": "gzip, deflate",
  128. "accept-language": "zh-CN,zh-Hans;q=0.9",
  129. "user-agent": get_random_user_agent('pc'),
  130. "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
  131. }
  132. params = {
  133. 'mixId': str(item_id),
  134. 'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
  135. 'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
  136. 'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
  137. '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
  138. 'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
  139. }
  140. cookies = {
  141. 'ixigua-a-s': '1',
  142. 'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
  143. 'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
  144. 'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
  145. '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
  146. 'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
  147. 'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
  148. '__ac_nonce': '06304878000964fdad287',
  149. '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
  150. 'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
  151. 'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
  152. '_tea_utm_cache_1300': 'undefined',
  153. 'support_avif': 'false',
  154. 'support_webp': 'false',
  155. 'xiguavideopcwebid': '7134967546256016900',
  156. 'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
  157. }
  158. urllib3.disable_warnings()
  159. s = requests.session()
  160. # max_retries=3 重试3次
  161. s.mount('http://', HTTPAdapter(max_retries=3))
  162. s.mount('https://', HTTPAdapter(max_retries=3))
  163. response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False,
  164. proxies=Common.tunnel_proxies(), timeout=5)
  165. response.close()
  166. if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
  167. Common.logger(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
  168. return None
  169. else:
  170. video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
  171. if video_info == {}:
  172. return None
  173. video_dict = {
  174. "video_title": video_info.get("title", ""),
  175. "video_id": video_info.get("videoResource", {}).get("vid", ""),
  176. "gid": str(item_id),
  177. "play_cnt": int(video_info.get("video_watch_count", 0)),
  178. "like_cnt": int(video_info.get("video_like_count", 0)),
  179. "comment_cnt": int(cls.get_comment_cnt(item_id)),
  180. "share_cnt": 0,
  181. "favorite_cnt": 0,
  182. "duration": int(video_info.get("video_duration", 0)),
  183. "video_width": int(cls.get_video_url(video_info)["video_width"]),
  184. "video_height": int(cls.get_video_url(video_info)["video_height"]),
  185. "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
  186. "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S",
  187. time.localtime(int(video_info.get("video_publish_time", 0)))),
  188. "user_name": video_info.get("user_info", {}).get("name", ""),
  189. "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
  190. "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
  191. "cover_url": video_info.get("poster_url", ""),
  192. "audio_url": cls.get_video_url(video_info)["audio_url"],
  193. "video_url": cls.get_video_url(video_info)["video_url"],
  194. "session": f"xigua-search-{int(time.time())}"
  195. }
  196. return video_dict
  197. @classmethod
  198. def repeat_video(cls, log_type, crawler, video_id, env):
  199. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  200. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  201. return len(repeat_video)
  202. @classmethod
  203. def quit(cls, log_type, crawler, env, driver: WebDriver):
  204. Common.logger(log_type, crawler).info("退出浏览器")
  205. Common.logging(log_type, crawler, env, "退出浏览器")
  206. driver.quit()
  207. quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
  208. os.system(quit_cmd)
  209. @classmethod
  210. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  211. mq = MQ(topic_name="topic_crawler_etl_" + env)
  212. Common.logger(log_type, crawler).info("启动 Chrome 浏览器")
  213. Common.logging(log_type, crawler, env, "启动 Chrome 浏览器")
  214. # kill 所有 Chrome 进程
  215. quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
  216. os.system(quit_cmd)
  217. time.sleep(1)
  218. # 启动 Chrome,指定端口号:12306
  219. cmd = 'open -a "Google Chrome" --args --remote-debugging-port=12306'
  220. os.system(cmd)
  221. # 打印请求配置
  222. ca = DesiredCapabilities.CHROME
  223. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  224. # 配置 chromedriver
  225. if env == "dev":
  226. chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
  227. else:
  228. # chromedriver = "/usr/bin/chromedriver"
  229. chromedriver = "/Users/kanyikan/Downloads/chromedriver/chromedriver_v114/chromedriver"
  230. # # 设置IP代理
  231. # proxy = Proxy()
  232. # proxy.proxy_type = ProxyType.MANUAL
  233. # proxy.http_proxy = Common.tunnel_proxies()["http"] # 代理的IP地址和端口号
  234. # 初始化浏览器
  235. browser = webdriver.ChromeOptions()
  236. # browser.add_argument(f'--proxy-server={Common.tunnel_proxies()}') # 代理的IP地址和端口号
  237. browser.add_experimental_option("debuggerAddress", "127.0.0.1:12306")
  238. # driver初始化
  239. driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
  240. driver.implicitly_wait(10)
  241. Common.logger(log_type, crawler).info("打开西瓜推荐页")
  242. Common.logging(log_type, crawler, env, "打开西瓜推荐页")
  243. driver.get(f"https://www.ixigua.com/")
  244. time.sleep(2)
  245. # 检查登录状态
  246. if len(driver.find_elements(By.XPATH, '//*[@class="BU-Component-Header-Avatar__image"]')) == 0:
  247. Common.logger(log_type, crawler).info("登录失效")
  248. Common.logging(log_type, crawler, env, "登录失效")
  249. driver.get_screenshot_as_file(f"./{crawler}/photos/logon_err.png")
  250. # # 登录失效,报警
  251. # if 20 >= datetime.datetime.now().hour >= 10:
  252. # Feishu.bot(log_type, crawler, "西瓜推荐,登录失效")
  253. return
  254. videoList_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard HorizontalChannelBlockList__item"]')
  255. if len(videoList_elements) == 0:
  256. Common.logger(log_type, crawler).info("到底啦~~~~~~~~~~\n")
  257. Common.logging(log_type, crawler, env, "到底啦~~~~~~~~~~\n")
  258. cls.quit(log_type, crawler, env, driver)
  259. return
  260. for i, video_element in enumerate(videoList_elements):
  261. Common.logger(log_type, crawler).info(f"正在抓取第{i+1}条视频")
  262. Common.logging(log_type, crawler, env, f"正在抓取第{i+1}条视频")
  263. item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[i].get_attribute("href")
  264. item_id = item_id.replace("https://www.ixigua.com/", "").replace("?&", "")
  265. Common.logger(log_type, crawler).info(f"item_id:{item_id}")
  266. video_dict = cls.get_video_info(log_type, crawler, item_id)
  267. if video_dict is None:
  268. Common.logger(log_type, crawler).info("无效视频\n")
  269. Common.logging(log_type, crawler, env, "无效视频\n")
  270. continue
  271. for k, v in video_dict.items():
  272. Common.logger(log_type, crawler).info(f"{k}:{v}")
  273. Common.logging(log_type, crawler, env, f"{video_dict}")
  274. if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  275. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  276. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  277. elif any(str(word) if str(word) in video_dict["video_title"] else False
  278. for word in get_config_from_mysql(log_type=log_type,
  279. source=crawler,
  280. env=env,
  281. text="filter",
  282. action="")) is True:
  283. Common.logger(log_type, crawler).info('已中过滤词\n')
  284. Common.logging(log_type, crawler, env, '已中过滤词\n')
  285. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  286. Common.logger(log_type, crawler).info('视频已下载\n')
  287. Common.logging(log_type, crawler, env, '视频已下载\n')
  288. else:
  289. # Common.logger(log_type, crawler).info("满足下载规则\n")
  290. video_dict["out_user_id"] = video_dict["user_id"]
  291. video_dict["platform"] = crawler
  292. video_dict["strategy"] = log_type
  293. video_dict["out_video_id"] = video_dict["video_id"]
  294. video_dict["width"] = video_dict["video_width"]
  295. video_dict["height"] = video_dict["video_height"]
  296. video_dict["crawler_rule"] = json.dumps(rule_dict)
  297. video_dict["user_id"] = our_uid
  298. video_dict["publish_time"] = video_dict["publish_time_str"]
  299. video_dict["strategy_type"] = log_type
  300. mq.send_msg(video_dict)
  301. cls.quit(log_type, crawler, env, driver)
  302. if __name__ == "__main__":
  303. # XiguaRecommend.get_videoList("recommend", "xigua", "dev")
  304. print(subprocess.run(['crontab', '-l']))
  305. print(subprocess.run(['crontab', '-e']))
  306. pass