wangkun 2 年之前
父節點
當前提交
8da79af928
共有 3 個文件被更改,包括 303 次插入12 次删除
  1. 14 7
      main/demo.py
  2. 288 3
      main/gzh_recommend.py
  3. 1 2
      main/run_gzh_recommend.py

File diff suppressed because it is too large
+ 14 - 7
main/demo.py


+ 288 - 3
main/gzh_recommend.py

@@ -2,13 +2,16 @@
 # @Author: wangkun
 # @Time: 2022/8/1
 # import time
-# import base64
+import base64
 import json
 import os
+import sys
 import time
 # import urllib.parse
 import requests
 import urllib3
+
+sys.path.append(os.getcwd())
 from crawler_gzh.main.common import Common
 from crawler_gzh.main.feishu_lib import Feishu
 from crawler_gzh.main.publish import Publish
@@ -44,6 +47,7 @@ class Recommend:
 
                 with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
                     contents = json.load(f, strict=False)
+                Common.logger(log_type).info("chlsfile:{}", new_file)
                 for content in contents:
                     if "mp.weixin.qq.com" in content['host']:
                         if content["path"] == r"/mp/getappmsgext":
@@ -96,6 +100,58 @@ class Recommend:
                 time.sleep(30)
                 cls.get_token(log_type)
 
+    @classmethod
+    def get_token_v2(cls, log_type):
+        # charles 抓包文件保存目录
+        charles_file_dir = "./crawler-kanyikan-recommend/chlsfiles/"
+        # charles_file_dir = "../chlsfiles/"
+
+        if int(len(os.listdir(charles_file_dir))) == 1:
+            Common.logger(log_type).info("未找到chlsfile文件,等待60s")
+            time.sleep(60)
+        else:
+            try:
+                # 目标文件夹下所有文件
+                all_file = sorted(os.listdir(charles_file_dir))
+
+                # 获取到目标文件
+                old_file = all_file[-1]
+
+                # 分离文件名与扩展名
+                new_file = os.path.splitext(old_file)
+
+                # 重命名文件后缀
+                os.rename(os.path.join(charles_file_dir, old_file),
+                          os.path.join(charles_file_dir, new_file[0] + ".txt"))
+
+                with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
+                    contents = json.load(f, strict=False)
+                # Common.logger(log_type).info("chlsfile:{}\n", new_file)
+                for content in contents:
+                    if content["host"] == "mp.weixin.qq.com" and content["path"] == r"/mp/getappmsgext":
+                        # query
+                        query = content["query"]
+                        Feishu.update_values("recommend", "gzh", "VzrN7E", "B9:B9", [[query]])
+                        Common.logger(log_type).info("保存query成功\n")
+
+                        headers = content["request"]["header"]["headers"]
+
+                        # body
+                        body = content["request"]["body"]["text"]
+                        # Common.logger(log_type).info("body:{}", body)
+                        Feishu.update_values("recommend", "gzh", "VzrN7E", "B8:B8", [[body]])
+                        Common.logger(log_type).info("保存body成功\n")
+
+                        # x-wechat-key
+                        for header in headers:
+                            if header["name"] == "x-wechat-key":
+                                x_wechat_key = header["value"]
+                                Feishu.update_values("recommend", "gzh", "VzrN7E", "B10:B10", [[x_wechat_key]])
+                                Common.logger(log_type).info("保存x_wechat_key成功\n")
+                                return True
+            except Exception as e:
+                Common.logger(log_type).error("get_token_v2异常:{}", e)
+
     # 获取推荐列表
     @classmethod
     def get_recommend(cls, log_type):
@@ -280,6 +336,235 @@ class Recommend:
         except Exception as e:
             Common.logger(log_type).error("get_recommend异常:{}", e)
 
+    # 使用 token 获取推荐列表
+    @classmethod
+    def get_recommend_by_token(cls, log_type):
+        try:
+            get_token = cls.get_token_v2(log_type)
+            if get_token is not True:
+                Common.logger(log_type).warning("未获取到token,10s后重试")
+                time.sleep(10)
+                cls.get_recommend_by_token(log_type)
+            else:
+                # 获取公众号token
+                token_sheet = Feishu.get_values_batch(log_type, "gzh", "VzrN7E")
+                body = token_sheet[7][1]
+                query = token_sheet[8][1]
+                x_wechat_key = token_sheet[9][1]
+
+                url = "https://mp.weixin.qq.com/mp/getappmsgext?"
+                headers = {
+                    "content-type": "application/x-www-form-urlencoded",
+                    "x-wechat-uin": "MjAxMDc0Nzg2MA%3D%3D",
+                    "accept": "*/*",
+                    "accept-encoding": "gzip, deflate, br",
+                    "x-wechat-key": x_wechat_key,
+                    "x-wechat-acctmode": "0",
+                    "exportkey": "ASgNaiqfqTTPeQ%2BQ7X3yqzA%3D",
+                    "user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                                  "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
+                                  "MicroMessenger/8.0.26(0x18001a2b) NetType/WIFI Language/zh_CN",
+                    "accept-language": "zh-cn"
+                }
+                # query_string = {
+                #     "f": "json",
+                #     "mock": "",
+                #     "fasttmplajax": "1",
+                #     "uin": "",
+                #     "key": "",
+                #     "pass_ticket": "MPA2Yy1dnOo6JSfV1DNWyJcLBO9dwupvcgkQj6sXOo3puAQKD7t4Odst6kRxfmUc",
+                #     "wxtoken": "",
+                #     "devicetype": "iOS14.7.1",
+                #     "clientversion": "18001a2b",
+                #     "__biz": "MzkwMjM4OTYyMA==",
+                #     "enterid": "1659926777",
+                #     "appmsg_token": "",
+                #     "x5": "0",
+                #     "wx_header": "1"
+                # }
+                # form = {
+                #     "r": "0.2395852290889654",
+                #     "__biz": "MzkwMjM4OTYyMA==",
+                #     "appmsg_type": "9",
+                #     "mid": "2247483674",
+                #     "sn": "4719d4e269e8923f7cad6c8a1e43d14e",
+                #     "idx": "1",
+                #     "scene": "102",
+                #     "title": "%E4%B8%A4%E5%85%84%E5%BC%9F%E6%95%B4%E5%A4%A9%E5%A5%BD%E5%90%83%E6%87%92%E5%81%9A%EF%BC%8C%E6%97%A0%E6%89%80%E4%BA%8B%E4%BA%8B%E8%80%81%E6%83%B3%E7%9D%80%E4%B8%8D%E5%8A%B3%E8%80%8C%E8%8E%B7%EF%BC%8C%E5%A5%BD%E4%BA%86%E6%8A%A5%E5%BA%94%E6%9D%A5%E4%BA%86",
+                #     "ct": "1659803693",
+                #     "abtest_cookie": "",
+                #     "devicetype": "iOS14.7.1",
+                #     "version": "18001a2b",
+                #     "is_need_ticket": "0",
+                #     "is_need_ad": "1",
+                #     "comment_id": "0",
+                #     "is_need_reward": "0",
+                #     "both_ad": "0",
+                #     "reward_uin_count": "0",
+                #     "send_time": "",
+                #     "msg_daily_idx": "1",
+                #     "is_original": "0",
+                #     "is_only_read": "1",
+                #     "req_id": "",
+                #     "pass_ticket": "MPA2Yy1dnOo6JSfV1DNWyJcLBO9dwupvcgkQj6sXOo3puAQKD7t4Odst6kRxfmUc",
+                #     "is_temp_url": "0",
+                #     "item_show_type": "5",
+                #     "tmp_version": "1",
+                #     "more_read_type": "0",
+                #     "appmsg_like_type": "2",
+                #     "related_video_sn": "",
+                #     "related_video_num": "5",
+                #     "vid": "wxv_2520118281538846720",
+                #     "is_pay_subscribe": "0",
+                #     "pay_subscribe_uin_count": "0",
+                #     "has_red_packet_cover": "0",
+                #     "album_id": "1296223588617486300",
+                #     "album_video_num": "5",
+                #     "cur_album_id": "",
+                #     "is_public_related_video": "0",
+                #     "encode_info_by_base64": "1",
+                #     "exptype": ""
+                # }
+
+                urllib3.disable_warnings()
+                response = requests.post(url=url, headers=headers, params=query, data=body, proxies=proxies,
+                                         verify=False)
+                if "related_tag_video" not in response.json():
+                    Common.logger(log_type).warning("response:{}\n", response.text)
+                elif len(response.json()["related_tag_video"]) == 0:
+                    Common.logger(log_type).warning("response:{}\n", response.text)
+                    # time.sleep(10)
+                    # cls.get_recommend(log_type)
+                else:
+                    feeds = response.json()["related_tag_video"]
+                    for m in range(len(feeds)):
+                        # video_title
+                        if "title" not in feeds[m]:
+                            video_title = 0
+                        else:
+                            video_title = feeds[m]["title"]
+                            video_title = base64.b64decode(video_title).decode("utf-8")
+
+                        # video_id
+                        if "vid" not in feeds[m]:
+                            video_id = 0
+                        else:
+                            video_id = feeds[m]["vid"]
+
+                        # play_cnt
+                        if "read_num" not in feeds[m]:
+                            play_cnt = 0
+                        else:
+                            play_cnt = feeds[m]["read_num"]
+
+                        # like_cnt
+                        if "like_num" not in feeds[m]:
+                            like_cnt = 0
+                        else:
+                            like_cnt = feeds[m]["like_num"]
+
+                        # duration
+                        if "duration" not in feeds[m]:
+                            duration = 0
+                        else:
+                            duration = feeds[m]["duration"]
+
+                        # video_width / video_height
+                        if "videoWidth" not in feeds[m] or "videoHeight" not in feeds[m]:
+                            video_width = 0
+                            video_height = 0
+                        else:
+                            video_width = feeds[m]["videoWidth"]
+                            video_height = feeds[m]["videoHeight"]
+
+                        # send_time
+                        if "pubTime" not in feeds[m]:
+                            send_time = 0
+                        else:
+                            send_time = feeds[m]["pubTime"]
+
+                        # user_name
+                        if "srcDisplayName" not in feeds[m]:
+                            user_name = 0
+                        else:
+                            user_name = feeds[m]["srcDisplayName"]
+                            user_name = base64.b64decode(user_name).decode("utf-8")
+
+                        # user_id
+                        if "srcUserName" not in feeds[m]:
+                            user_id = 0
+                        else:
+                            user_id = feeds[m]["srcUserName"]
+
+                        # head_url
+                        if "head_img_url" not in feeds[m]:
+                            head_url = 0
+                        else:
+                            head_url = feeds[m]["head_img_url"]
+
+                        # cover_url
+                        if "cover" not in feeds[m]:
+                            cover_url = 0
+                        else:
+                            cover_url = feeds[m]["cover"]
+
+                        # video_url
+                        if "url" not in feeds[m]:
+                            video_url = 0
+                        else:
+                            video_url = feeds[m]["url"]
+
+                        # 下载链接
+                        download_url = cls.get_url(log_type, video_url)
+
+                        Common.logger(log_type).info("video_title:{}", video_title)
+                        Common.logger(log_type).info("video_id:{}", video_id)
+                        Common.logger(log_type).info("play_cnt:{}", play_cnt)
+                        Common.logger(log_type).info("like_cnt:{}", like_cnt)
+                        Common.logger(log_type).info("duration:{}", duration)
+                        Common.logger(log_type).info("video_width:{}", video_width)
+                        Common.logger(log_type).info("video_height:{}", video_height)
+                        Common.logger(log_type).info("send_time:{}", send_time)
+                        Common.logger(log_type).info("user_name:{}", user_name)
+                        Common.logger(log_type).info("user_id:{}", user_id)
+                        Common.logger(log_type).info("head_url:{}", head_url)
+                        Common.logger(log_type).info("cover_url:{}", cover_url)
+                        Common.logger(log_type).info("video_url:{}", video_url)
+                        Common.logger(log_type).info("download_url:{}", download_url)
+
+                        if video_id == 0 or video_title == 0 or duration == 0 or video_url == 0:
+                            Common.logger(log_type).info("无效视频\n")
+                        elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for x in
+                                               y]:
+                            Common.logger(log_type).info("该视频已下载\n")
+                        elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "zWKFGb") for x in
+                                               y]:
+                            Common.logger(log_type).info("该视频已在feeds中\n")
+                        else:
+                            Feishu.insert_columns(log_type, "gzh", "zWKFGb", "ROWS", 1, 2)
+                            get_feeds_time = int(time.time())
+                            values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(get_feeds_time)),
+                                       "推荐榜",
+                                       video_title,
+                                       str(video_id),
+                                       play_cnt,
+                                       like_cnt,
+                                       duration,
+                                       str(video_width) + "*" + str(video_height),
+                                       time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(send_time)),
+                                       user_name,
+                                       user_id,
+                                       head_url,
+                                       cover_url,
+                                       video_url,
+                                       download_url
+                                       ]]
+                            time.sleep(1)
+                            Feishu.update_values(log_type, "gzh", "zWKFGb", "D2:T2", values)
+                            Common.logger(log_type).info("添加至recommend_feeds成功\n")
+        except Exception as e:
+            Common.logger(log_type).error("get_recommend_by_token异常:{}", e)
+
     # 获取视频下载链接
     @classmethod
     def get_url(cls, log_type, url):
@@ -448,8 +733,8 @@ class Recommend:
 
 
 if __name__ == "__main__":
-    Recommend.get_recommend("recommend")
+    Recommend.get_recommend_by_token("recommend")
     # Recommend.download_publish("recommend")
     # Recommend.run_download_publish("recommend", "dev")
-    # token = Recommend.get_token("recommend")
+    # print(Recommend.get_token_v2("recommend"))
     # print(token)

+ 1 - 2
main/run_gzh_recommend.py

@@ -18,8 +18,7 @@ class Main:
         while True:
             if 21 >= datetime.datetime.now().hour >= 8:
                 # 获取列表
-                Recommend.get_token("recommend")
-                Recommend.get_recommend("recommend")
+                Recommend.get_recommend_by_token("recommend")
                 # 下载/上传
                 Recommend.run_download_publish("recommend", "prod")
                 # 清除日志

Some files were not shown because too many files changed in this diff