zfsh_recommend.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. import json
  2. import random
  3. import time
  4. import uuid
  5. import requests
  6. from common.common import Common
  7. from common.scheduling_db import MysqlHelper
  8. from common.mq import MQ
  9. from common.aliyun_log import AliyunLogger
  10. from common.public import download_rule, get_config_from_mysql
  11. proxies = {"http": None, "https": None}
  12. headers = {
  13. 'Host': 'mini.vvuiiu.cn',
  14. 'accept': '*/*',
  15. 'content-type': 'application/json',
  16. 'accept-language': 'zh-cn',
  17. 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
  18. 'referer': 'https://servicewechat.com/wxa903dc775e85eb5b/11/page-frame.html'
  19. }
  20. class ZfshRecommend:
  21. platform = ("祝福生活")
  22. download_cnt = 0
  23. element_list = []
  24. i = 0
  25. @classmethod
  26. def get_video_url(cls, id, uuid1, log_type, crawler, env):
  27. try:
  28. url = "https://mini.vvuiiu.cn/article/detail"
  29. payload = json.dumps({
  30. "id": id,
  31. "uuid": uuid1,
  32. "platform": 35,
  33. "appid": "wxa903dc775e85eb5b"
  34. })
  35. r = requests.post(url=url, headers=headers, data=payload, proxies=proxies, verify=False)
  36. if "data" not in r.text or r.status_code != 200:
  37. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  38. Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
  39. return
  40. elif "data" not in r.json():
  41. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
  42. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
  43. return
  44. elif len(r.json()["data"]) == 0:
  45. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
  46. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
  47. return
  48. else:
  49. # 获取视频url
  50. video_url = r.json()["data"].get("video_url")
  51. return video_url
  52. except Exception as e:
  53. Common.logger(log_type, crawler).error(f" 获取视频链接异常:{e}\n")
  54. Common.logging(log_type, crawler, env, f"获取视频链接异常:{e}\n")
  55. @classmethod
  56. def repeat_video(cls, log_type, crawler, video_id, env):
  57. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-06-26' and out_video_id="{video_id}"; """
  58. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  59. return len(repeat_video)
  60. @classmethod
  61. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  62. mq = MQ(topic_name="topic_crawler_etl_" + env)
  63. uuid1 = str(uuid.uuid1())
  64. for page in range(1, 101):
  65. try:
  66. Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
  67. Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
  68. url = "https://mini.vvuiiu.cn/article/getArticleList"
  69. payload = json.dumps({
  70. "page": page,
  71. "size": 5,
  72. "category_id": "393774", # 每日推荐id
  73. "from_type": 1,
  74. "uuid": uuid1,
  75. # "openid": "oY5tI5FgvH9Jmc8cntj81t5Ugsds",
  76. "platform": 35,
  77. "appid": "wxa903dc775e85eb5b"
  78. })
  79. r = requests.post(url=url, headers=headers, data=payload, proxies=proxies, verify=False)
  80. if "data" not in r.text or r.status_code != 200:
  81. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  82. Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
  83. return
  84. elif "data" not in r.json():
  85. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
  86. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
  87. return
  88. elif "list" not in r.json()["data"]:
  89. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
  90. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']}\n")
  91. return
  92. elif len(r.json()["data"]["list"]) == 0:
  93. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
  94. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
  95. return
  96. else:
  97. # 视频列表
  98. feeds = r.json()["data"]["list"]
  99. for i in range(len(feeds)):
  100. try:
  101. if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
  102. cls.i = 0
  103. cls.download_cnt = 0
  104. cls.element_list = []
  105. return
  106. trace_id = crawler + str(uuid.uuid1())
  107. AliyunLogger.logging(
  108. code="1001",
  109. platform=crawler,
  110. mode=log_type,
  111. env=env,
  112. data=feeds[i],
  113. message="扫描到一条视频"
  114. )
  115. cls.i += 1
  116. video_title = feeds[i].get("title", "").strip().replace("\n", "") \
  117. .replace("/", "").replace("\\", "").replace("\r", "") \
  118. .replace(":", "").replace("*", "").replace("?", "") \
  119. .replace("?", "").replace('"', "").replace("<", "") \
  120. .replace(">", "").replace("|", "").replace(" ", "") \
  121. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  122. .replace("'", "").replace("#", "").replace("Merge", "")
  123. publish_time_stamp = int(feeds[i].get("article_id", 0))
  124. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  125. id = feeds[i]["id"]
  126. # 获取视频链接
  127. video_url = cls.get_video_url(id, uuid1, log_type, crawler, env)
  128. video_dict = {
  129. "video_title": video_title,
  130. "video_id": feeds[i]["id"], # 视频id
  131. "publish_time_stamp": publish_time_stamp,
  132. "publish_time_str": publish_time_str,
  133. "is_video": int(feeds[i].get("is_video", 0)), # 类型
  134. "category_id": int(feeds[i].get("category_id", 0)), # 视频来源(精彩推荐)
  135. "cover_url": feeds[i].get("image_path", ""), # 视频封面
  136. "video_url": video_url, # 视频链接
  137. "click": int(feeds[i].get("click", 0)), # 点击数
  138. "video_width": int(feeds[i].get("vw", 0)),
  139. "video_height": int(feeds[i].get("vh", 0)),
  140. "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
  141. "user_id": feeds[i].get("openid", ""),
  142. "play_cnt": 0,
  143. "like_cnt": 0,
  144. "comment_cnt": 0,
  145. "share_cnt": 0,
  146. # "duration": feeds[i].get("mediaDuration", 0),
  147. "session": ""
  148. }
  149. for k, v in video_dict.items():
  150. Common.logger(log_type, crawler).info(f"{k}:{v}")
  151. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  152. if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict[
  153. "video_url"] == "":
  154. Common.logger(log_type, crawler).info("无效视频\n")
  155. Common.logging(log_type, crawler, env, "无效视频\n")
  156. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  157. rule_dict=rule_dict) is False:
  158. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  159. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  160. elif any(str(word) if str(word) in video_dict["video_title"] else False
  161. for word in get_config_from_mysql(log_type=log_type,
  162. source=crawler,
  163. env=env,
  164. text="filter",
  165. action="")) is True:
  166. Common.logger(log_type, crawler).info('已中过滤词\n')
  167. Common.logging(log_type, crawler, env, '已中过滤词\n')
  168. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  169. Common.logger(log_type, crawler).info('视频已下载\n')
  170. Common.logging(log_type, crawler, env, '视频已下载\n')
  171. AliyunLogger.logging(
  172. code="2002",
  173. platform=crawler,
  174. mode=log_type,
  175. message="重复的视频",
  176. data=video_dict,
  177. trace_id=trace_id,
  178. env=env
  179. )
  180. else:
  181. video_dict["out_user_id"] = video_dict["user_id"]
  182. video_dict["platform"] = crawler
  183. video_dict["strategy"] = log_type
  184. video_dict["out_video_id"] = video_dict["video_id"]
  185. video_dict["width"] = video_dict["video_width"]
  186. video_dict["height"] = video_dict["video_height"]
  187. video_dict["crawler_rule"] = json.dumps(rule_dict)
  188. video_dict["user_id"] = our_uid
  189. video_dict["publish_time"] = video_dict["publish_time_str"]
  190. mq.send_msg(video_dict)
  191. AliyunLogger.logging(
  192. code="1002",
  193. platform=crawler,
  194. mode=log_type,
  195. message="成功发送至 ETL",
  196. data=video_dict,
  197. trace_id=trace_id,
  198. env=env
  199. )
  200. cls.download_cnt += 1
  201. interval = random.randrange(5, 11)
  202. time.sleep(interval)
  203. except Exception as e:
  204. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  205. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  206. AliyunLogger.logging(
  207. code="3000",
  208. platform=crawler,
  209. mode=log_type,
  210. message=f"抓取单条视频异常:{e}\n",
  211. env=env
  212. )
  213. except Exception as e:
  214. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  215. Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
  216. AliyunLogger.logging(
  217. code="3000",
  218. platform=crawler,
  219. mode=log_type,
  220. message=f"抓取第{page}页时异常:{e}\n",
  221. env=env
  222. )
  223. if __name__ == "__main__":
  224. rule_dict1 = {"period": {"min": 365, "max": 365},
  225. "duration": {"min": 30, "max": 1800},
  226. "favorite_cnt": {"min": 0, "max": 0},
  227. "videos_cnt": {"min": 10, "max": 20},
  228. "share_cnt": {"min": 0, "max": 0}}
  229. ZfshRecommend.get_videoList("recommend", "zhufushenghuo,", "16QspO", rule_dict1, 'dev')