Browse Source

add weixinzhishu: search_key

wangkun 2 years ago
parent
commit
945ea4037d

BIN
.DS_Store


BIN
weixinzhishu/.DS_Store


File diff suppressed because it is too large
+ 0 - 0
weixinzhishu/weixinzhishu_chlsfiles/charles202302131147.txt


+ 43 - 0
weixinzhishu/weixinzhishu_main/demo.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/13
+import json
+import os
+
+
+class Demo:
+    @classmethod
+    def demo1(cls):
+        # charles 抓包文件保存目录
+        chlsfile_path = f"../weixinzhishu_chlsfiles/"
+        if len(os.listdir(chlsfile_path)) == 0:
+            print("chlsfile文件夹为空")
+        else:
+            print(f"chlsfile_list:{sorted(os.listdir(chlsfile_path))}")
+            # 获取最新的 chlsfile
+            chlsfile = sorted(os.listdir(chlsfile_path))[-1]
+            # 分离文件名与扩展名
+            new_file = os.path.splitext(chlsfile)
+
+            # 重命名文件后缀
+            os.rename(os.path.join(chlsfile_path, chlsfile),
+                      os.path.join(chlsfile_path, new_file[0] + ".txt"))
+
+            with open(f"{chlsfile_path}{new_file[0]}.txt", encoding='utf-8-sig', errors='ignore') as f:
+                contents = json.load(f, strict=False)
+
+            if "search.weixin.qq.com" not in [text['host'] for text in contents]:
+                return "未找到search_key"
+            else:
+                for content in contents:
+                    if content["host"] == "search.weixin.qq.com" and content[
+                        "path"] == "/cgi-bin/wxaweb/wxindexgetusergroup":
+                        print(f"content:{content}")
+                        text = content['request']['body']['text']
+                        search_key = json.loads(text)['search_key']
+                        openid = json.loads(text)['openid']
+                        return search_key, openid
+
+
+if __name__ == "__main__":
+    print(Demo.demo1())

+ 8 - 4
weixinzhishu/weixinzhishu_main/search_key.py

@@ -36,8 +36,10 @@ class Searchkey:
             driver.implicitly_wait(10)
             # Common.logger(log_type, crawler).info('点击微信指数')
             driver.find_elements(By.NAME, '消息')[-1].click()
+            time.sleep(1)
+            driver.find_elements(By.NAME, '关闭')[-1].click()
             # Common.logger(log_type, crawler).info('休眠 3 秒,退出微信')
-            time.sleep(3)
+            time.sleep(1)
             driver.quit()
         except Exception as e:
             Common.logger(log_type, crawler).error(f'start_wechat异常:{e}\n')
@@ -71,9 +73,11 @@ class Searchkey:
                 else:
                     for content in contents:
                         if content["host"] == "search.weixin.qq.com" and content["path"] == "/cgi-bin/wxaweb/wxindexgetusergroup":
+                            print(f"content:{content}")
                             text = content['request']['body']['text']
                             search_key = json.loads(text)['search_key']
-                            return search_key
+                            openid = json.loads(text)['openid']
+                            return search_key, openid
         except Exception as e:
             Common.logger(log_type, crawler).exception(f"get_search_key异常:{e}\n")
             return None
@@ -118,11 +122,11 @@ class Searchkey:
                 cls.start_wechat(log_type, crawler)
                 cls.get_search_key(log_type, crawler)
             else:
-                Common.logger(log_type, crawler).info(f'已获取 search_key:{search_key}')
+                Common.logger(log_type, crawler).info(f'已获取 search_key,openid:{search_key}')
                 Feishu.insert_columns(log_type, crawler, 'sVL74k', 'ROWS', 1, 2)
                 time.sleep(1)
                 time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
-                Feishu.update_values(log_type, crawler, 'sVL74k', 'A2:B2', [[time_str, search_key]])
+                Feishu.update_values(log_type, crawler, 'sVL74k', 'A2:Z2', [[time_str, search_key[0], search_key[-1]]])
                 cls.del_search_key_from_feishu(log_type, crawler)
                 Common.logger(log_type, crawler).info(f"search_key:{search_key}写入飞书表成功\n")
                 return

+ 2 - 2
weixinzhishu/weixinzhishu_main/weixinzhishu.py

@@ -10,8 +10,8 @@ class Weixinzhishu:
     def weixinzhishu(cls, log_type, crawler, query):
         url = "https://search.weixin.qq.com/cgi-bin/wxaweb/wxindex"
         payload = json.dumps({
-            "openid": "ov4ns0OAM_om-YOT7idMCe5gxoeQ",
-            "search_key": "1676279841643011_944075427",
+            "openid": "ov4ns0KAQ7u9QlaFViTsUk7aTwU8",
+            "search_key": "1676287183675843_2172943316",
             "cgi_name": "GetDefaultIndex",
             "start_ymd": "20230206",
             "end_ymd": "20230210",

Some files were not shown because too many files changed in this diff