wangkun 2 anni fa
parent
commit
a828bd3aa4

BIN
.DS_Store


+ 66 - 80
common/users.py

@@ -10,8 +10,7 @@ import sys
 import requests
 import requests
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
-from common.feishu import Feishu
-
+from common.db import MysqlHelper
 
 
 class Users:
 class Users:
     @classmethod
     @classmethod
@@ -56,7 +55,7 @@ class Users:
             Common.logger(log_type, crawler).error(f"create_user异常:{e}\n")
             Common.logger(log_type, crawler).error(f"create_user异常:{e}\n")
 
 
     @classmethod
     @classmethod
-    def create_user(cls, log_type, crawler, sheetid, out_user_dict, env, machine):
+    def create_user(cls, log_type, crawler, out_user_dict, env, machine):
         """
         """
         补全飞书用户表信息,并返回
         补全飞书用户表信息,并返回
         :param log_type: 日志
         :param log_type: 日志
@@ -69,6 +68,8 @@ class Users:
         """
         """
         try:
         try:
             # 获取站外账号信息
             # 获取站外账号信息
+            out_uid = out_user_dict['out_uid']  # 站外uid
+            user_name = out_user_dict['user_name']  # 站外用户名
             out_avatar_url = out_user_dict['out_avatar_url']  # 站外头像
             out_avatar_url = out_user_dict['out_avatar_url']  # 站外头像
             out_create_time = out_user_dict['out_create_time']  # 站外注册时间,格式: YYYY-MM-DD HH:MM:SS
             out_create_time = out_user_dict['out_create_time']  # 站外注册时间,格式: YYYY-MM-DD HH:MM:SS
             out_tag = out_user_dict['out_tag']  # 站外标签,例如:搞笑博主
             out_tag = out_user_dict['out_tag']  # 站外标签,例如:搞笑博主
@@ -79,84 +80,69 @@ class Users:
             out_like = out_user_dict['out_like']  # 站外点赞量
             out_like = out_user_dict['out_like']  # 站外点赞量
             platform = out_user_dict['platform']  # 抓取平台,例如:小年糕、西瓜视频
             platform = out_user_dict['platform']  # 抓取平台,例如:小年糕、西瓜视频
             tag = out_user_dict['tag']  # 站内身份标签,例如:小年糕爬虫,小时榜爬虫策略;好看爬虫,频道榜爬虫策略;youtube爬虫,定向爬虫策略
             tag = out_user_dict['tag']  # 站内身份标签,例如:小年糕爬虫,小时榜爬虫策略;好看爬虫,频道榜爬虫策略;youtube爬虫,定向爬虫策略
-            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
-            user_list = []
-            for i in range(1, len(user_sheet)):
-                out_uid = user_sheet[i][2]
-                user_name = user_sheet[i][3]
-                our_uid = user_sheet[i][6]
-                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
-                # 站内 UID 为空,且数据库中(youtube+out_user_id)返回数量 == 0,则创建新的站内账号
-                if our_uid is None:
-                    sql = f""" select * from crawler_user where platform="{platform}" and out_user_id="{out_uid}" """
-                    our_user_info = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
-                    # 数据库中(youtube + out_user_id)返回数量 == 0,则创建站内账号UID,并写入定向账号飞书表。并结合站外用户信息,一并写入爬虫账号数据库
-                    if our_user_info is None or len(our_user_info) == 0:
-                        # 创建站内账号
-                        create_user_dict = {
-                            'nickName': user_name,
-                            'avatarUrl': out_avatar_url,
-                            'tagName': tag,  # 例如 'youtube爬虫,定向爬虫策略'
-                        }
-                        our_uid = cls.create_uid(log_type, crawler, create_user_dict, env)
-                        Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
-                        if env == 'prod':
-                            our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-                        else:
-                            our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-                        Common.logger(log_type, crawler).info(f'站内用户主页链接:{our_user_link}')
-                        Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
-                                             [[our_uid, our_user_link]])
-                        Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!')
-
-                        # 用户信息写入数据库
-                        sql = f""" insert into crawler_user(user_id, 
-                                                out_user_id, 
-                                                out_user_name, 
-                                                out_avatar_url, 
-                                                out_create_time, 
-                                                out_tag,
-                                                out_play_cnt, 
-                                                out_fans, 
-                                                out_follow, 
-                                                out_friend, 
-                                                out_like, 
-                                                platform, 
-                                                tag)
-                                                values({our_uid}, 
-                                                "{out_uid}", 
-                                                "{user_name}", 
-                                                "{out_avatar_url}", 
-                                                "{out_create_time}", 
-                                                "{out_tag}", 
-                                                {out_play_cnt}, 
-                                                {out_fans}, 
-                                                {out_follow}, 
-                                                {out_friend}, 
-                                                {out_like}, 
-                                                "{platform}",
-                                                "{tag}") """
-                        Common.logger(log_type, crawler).info(f'sql:{sql}')
-                        MysqlHelper.update_values(log_type, crawler, sql, env, machine)
-                        Common.logger(log_type, crawler).info('用户信息插入数据库成功!\n')
-                    # 数据库中(youtube + out_user_id)返回数量 != 0,则直接把数据库中的站内 UID 写入飞书
-                    else:
-                        our_uid = our_user_info[0][1]
-                        if 'env' == 'prod':
-                            our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-                        else:
-                            our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-                        Common.logger(log_type, crawler).info(f'站内用户主页链接:{our_user_link}')
-                        Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
-                                             [[our_uid, our_user_link]])
-                        Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
-                user_dict = {
-                    'out_user_id': out_uid,
-                    'out_user_name': user_name,
-                    'our_user_id': our_uid,
+            sql = f""" select * from crawler_user where platform="{platform}" and out_user_id="{out_uid}" """
+            our_user_info = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+            # 数据库中(youtube + out_user_id)返回数量 == 0,则创建站内账号UID,并写入定向账号飞书表。并结合站外用户信息,一并写入爬虫账号数据库
+            if our_user_info is None or len(our_user_info) == 0:
+                # 创建站内账号
+                create_user_dict = {
+                    'nickName': user_name,
+                    'avatarUrl': out_avatar_url,
+                    'tagName': tag,  # 例如 'youtube爬虫,定向爬虫策略'
                 }
                 }
-                user_list.append(user_dict)
-            return user_list
+                our_uid = cls.create_uid(log_type, crawler, create_user_dict, env)
+                Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                if env == 'prod':
+                    our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                else:
+                    our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                Common.logger(log_type, crawler).info(f'站内用户主页链接:{our_user_link}')
+
+                # 用户信息写入数据库
+                sql = f""" insert into crawler_user(user_id, 
+                                        out_user_id, 
+                                        out_user_name, 
+                                        out_avatar_url, 
+                                        out_create_time, 
+                                        out_tag,
+                                        out_play_cnt, 
+                                        out_fans, 
+                                        out_follow, 
+                                        out_friend, 
+                                        out_like, 
+                                        platform, 
+                                        tag)
+                                        values({our_uid}, 
+                                        "{out_uid}", 
+                                        "{user_name}", 
+                                        "{out_avatar_url}", 
+                                        "{out_create_time}", 
+                                        "{out_tag}", 
+                                        {out_play_cnt}, 
+                                        {out_fans}, 
+                                        {out_follow}, 
+                                        {out_friend}, 
+                                        {out_like}, 
+                                        "{platform}",
+                                        "{tag}") """
+                Common.logger(log_type, crawler).info(f'sql:{sql}')
+                MysqlHelper.update_values(log_type, crawler, sql, env, machine)
+                Common.logger(log_type, crawler).info('用户信息插入数据库成功!')
+                # 数据库中(youtube + out_user_id)返回数量 != 0,则直接把数据库中的站内 UID 写入飞书
+            else:
+                our_uid = our_user_info[0][1]
+                if 'env' == 'prod':
+                    our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                else:
+                    our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                Common.logger(log_type, crawler).info(f'站内用户主页链接:{our_user_link}')
+            user_dict = {
+                'out_uid': out_uid,
+                'user_name': user_name,
+                'our_uid': our_uid,
+                'our_user_link': our_user_link,
+            }
+            return user_dict
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).error(f"create_user:{e}\n")
             Common.logger(log_type, crawler).error(f"create_user:{e}\n")
 
 

+ 2 - 1
requirements.txt

@@ -9,4 +9,5 @@ selenium~=4.2.0
 urllib3==1.26.9
 urllib3==1.26.9
 emoji~=2.2.0
 emoji~=2.2.0
 Appium-Python-Client~=2.8.1
 Appium-Python-Client~=2.8.1
-atomac~=1.2.0
+atomac~=1.2.0
+lxml~=4.9.1

+ 9 - 14
weixinzhishu/weixinzhishu_main/get_weixinzhishu.py

@@ -79,6 +79,10 @@ class Weixinzhishu:
         try:
         try:
             while True:
             while True:
                 wechat_key = cls.get_wechat_key(log_type, crawler)
                 wechat_key = cls.get_wechat_key(log_type, crawler)
+                if wechat_key is None:
+                    Common.logger(log_type, crawler).info(f"wechat_key:{wechat_key}")
+                    time.sleep(10)
+                    continue
                 search_key = wechat_key[0]
                 search_key = wechat_key[0]
                 openid = wechat_key[-1]
                 openid = wechat_key[-1]
                 start_ymd = (date.today() + timedelta(days=-7)).strftime("%Y%m%d")
                 start_ymd = (date.today() + timedelta(days=-7)).strftime("%Y%m%d")
@@ -110,13 +114,6 @@ class Weixinzhishu:
                     time.sleep(10)
                     time.sleep(10)
                 elif response.json()['code'] == -10002:
                 elif response.json()['code'] == -10002:
                     Common.logger(log_type, crawler).info(f'{word}:该词暂未收录')
                     Common.logger(log_type, crawler).info(f'{word}:该词暂未收录')
-                    # # 数据写入飞书
-                    # now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
-                    # values = [[now, word, "该词暂未收录"]]
-                    # Feishu.insert_columns(log_type, crawler, "5011a2", "ROWS", 1, 2)
-                    # time.sleep(0.5)
-                    # Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2", values)
-                    # Common.logger(log_type, crawler).info(f'热词"{word}"微信指数数据写入飞书成功\n')
                     return word_wechat_score_dict
                     return word_wechat_score_dict
                 elif response.json()['code'] != 0:
                 elif response.json()['code'] != 0:
                     Common.logger(log_type, crawler).info(f'response:{response.text}\n')
                     Common.logger(log_type, crawler).info(f'response:{response.text}\n')
@@ -129,13 +126,6 @@ class Weixinzhishu:
                         score = time_index[i]['score']
                         score = time_index[i]['score']
                         wechat_score_dict = {"score": score, "scoreDate": score_time_str}
                         wechat_score_dict = {"score": score, "scoreDate": score_time_str}
                         wechat_score_list.append(wechat_score_dict)
                         wechat_score_list.append(wechat_score_dict)
-                        # # 数据写入飞书
-                        # now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
-                        # values = [[now, word, score_time_str, score]]
-                        # Feishu.insert_columns(log_type, crawler, "5011a2", "ROWS", 1, 2)
-                        # time.sleep(0.5)
-                        # Feishu.update_values(log_type, crawler, "5011a2", "F2:Z2", values)
-                        # Common.logger(log_type, crawler).info(f'热词"{word}"微信指数数据写入飞书成功\n')
                     return word_wechat_score_dict
                     return word_wechat_score_dict
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).error(f"get_word_score异常:{e}\n")
             Common.logger(log_type, crawler).error(f"get_word_score异常:{e}\n")
@@ -150,11 +140,13 @@ class Weixinzhishu:
         :param host: 域名
         :param host: 域名
         :return: 热词指数列表
         :return: 热词指数列表
         """
         """
+        score_num = 0
         while True:
         while True:
             word_list = cls.get_word(log_type, crawler, host)
             word_list = cls.get_word(log_type, crawler, host)
             if len(word_list) == 0:
             if len(word_list) == 0:
                 Common.logger(log_type, crawler).info(f"热词更新完毕\n")
                 Common.logger(log_type, crawler).info(f"热词更新完毕\n")
                 cls.pageNum = 1
                 cls.pageNum = 1
+                Common.logger(log_type, crawler).info(f"score_num: {score_num}")
                 return
                 return
             else:
             else:
                 wechat_score_data = []
                 wechat_score_data = []
@@ -167,9 +159,12 @@ class Weixinzhishu:
                     word_score_dict = cls.get_word_score(log_type, crawler, word_id, word)
                     word_score_dict = cls.get_word_score(log_type, crawler, word_id, word)
                     Common.logger(log_type, crawler).info(f"word_score_dict:{word_score_dict}\n")
                     Common.logger(log_type, crawler).info(f"word_score_dict:{word_score_dict}\n")
                     wechat_score_data.append(word_score_dict)
                     wechat_score_data.append(word_score_dict)
+                    if word_score_dict['wechatScores'] is not None:
+                        score_num += len(word_score_dict['wechatScores'])
                 Common.logger(log_type, crawler).info(f"wechat_score_data:{wechat_score_data}\n")
                 Common.logger(log_type, crawler).info(f"wechat_score_data:{wechat_score_data}\n")
                 cls.update_wechat_score(log_type, crawler, wechat_score_data, host)
                 cls.update_wechat_score(log_type, crawler, wechat_score_data, host)
 
 
+
     # 更新微信指数
     # 更新微信指数
     @classmethod
     @classmethod
     def update_wechat_score(cls, log_type, crawler, data, host):
     def update_wechat_score(cls, log_type, crawler, data, host):

+ 137 - 30
xigua/xigua_follow/xigua_follow.py

@@ -1,23 +1,27 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Author: wangkun
 # @Time: 2023/2/17
 # @Time: 2023/2/17
-import base64
 import json
 import json
+
+from lxml import etree
+import base64
 import os
 import os
+import random
 import shutil
 import shutil
+import string
 import sys
 import sys
 import time
 import time
-
 import requests
 import requests
 import urllib3
 import urllib3
+
 from selenium.webdriver import DesiredCapabilities
 from selenium.webdriver import DesiredCapabilities
 from selenium.webdriver.chrome.service import Service
 from selenium.webdriver.chrome.service import Service
 from selenium.webdriver.common.by import By
 from selenium.webdriver.common.by import By
 from seleniumwire import webdriver
 from seleniumwire import webdriver
 
 
-from common.db import MysqlHelper
-
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
+from common.db import MysqlHelper
+from common.users import Users
 from common.common import Common
 from common.common import Common
 from common.feishu import Feishu
 from common.feishu import Feishu
 from common.publish import Publish
 from common.publish import Publish
@@ -29,6 +33,7 @@ class Follow:
     offset = 0
     offset = 0
 
 
     platform = "西瓜视频"
     platform = "西瓜视频"
+    tag = "西瓜视频爬虫,定向爬虫策略"
 
 
     # 下载规则
     # 下载规则
     @staticmethod
     @staticmethod
@@ -57,24 +62,120 @@ class Follow:
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
             Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
 
 
+    @classmethod
+    def get_out_user_info(cls, log_type, crawler, out_uid):
+        try:
+            headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                       'referer': f'https://www.ixigua.com/home/{out_uid}',
+                       'Cookie': f'ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; __ac_signature={cls.random_signature()}; MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; s_v_web_id=verify_lef4i99x_32SosrdH_Qrtk_4LJn_8S7q_fhu16xe3s8ZV; tt_scid=QLJjPuHf6wxVqu6IIq6gHiJXQpVrCwrdhjH2zpm7-E3ZniE1RXBcP6M8b41FJOdo41e1; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1677047013%7C5866a444e5ae10a9df8c11551db75010fb77b657f214ccf84e503fae8d313d09; msToken=PerXJcDdIsZ6zXkGITsftXX4mDaVaW21GuqtzSVdctH46oXXT2GcELIs9f0XW2hunRzP6KVHLZaYElRvNYflLKUXih7lC27XKxs3HjdZiXPK9NQaoKbLfA==; ixigua-a-s=1',}
+            url = f"https://www.ixigua.com/home/{out_uid}"
+
+            response = requests.get(url=url, headers=headers, proxies=proxies).text
+            html = etree.HTML(response)
+            out_follow_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[1]/span')[0].text.encode('raw_unicode_escape').decode()
+            out_fans_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[2]/span')[0].text.encode('raw_unicode_escape').decode()
+            out_like_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[3]/span')[0].text.encode('raw_unicode_escape').decode()
+            out_avatar_url = f"""https:{html.xpath('//span[@class="component-avatar__inner"]//img/@src')[0]}"""
+            if "万" in out_follow_str:
+                out_follow = int(float(out_follow_str.split("万")[0])*10000)
+            else:
+                out_follow = int(out_follow_str.replace(",", ""))
+            if "万" in out_fans_str:
+                out_fans = int(float(out_fans_str.split("万")[0])*10000)
+            else:
+                out_fans = int(out_fans_str.replace(",", ""))
+            if "万" in out_like_str:
+                out_like = int(float(out_like_str.split("万")[0])*10000)
+            else:
+                out_like = int(out_like_str.replace(",", ""))
+            out_user_dict = {
+                "out_follow": out_follow,
+                "out_fans": out_fans,
+                "out_like": out_like,
+                "out_avatar_url": out_avatar_url,
+            }
+            # for k, v in out_user_dict.items():
+            #     print(f"{k}:{v}")
+            return out_user_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
+
+
     # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
     # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
     @classmethod
     @classmethod
-    def get_user_info_from_feishu(cls, log_type, crawler):
+    def get_user_list(cls, log_type, crawler, sheetid, env, machine):
         try:
         try:
-            user_sheet = Feishu.get_values_batch(log_type, crawler, '5tlTYB')
-            user_dict = {}
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            our_user_list = []
             for i in range(1, len(user_sheet)):
             for i in range(1, len(user_sheet)):
-                user_name = user_sheet[i][0]
-                out_id = user_sheet[i][1]
-                our_id = user_sheet[i][3]
-                if user_name is None or out_id is None or our_id is None:
-                    pass
+                out_uid = user_sheet[i][2]
+                user_name = user_sheet[i][3]
+                our_uid = user_sheet[i][6]
+                our_user_link = user_sheet[i][7]
+                if out_uid is None or user_name is None:
+                    Common.logger(log_type, crawler).info("空行\n")
                 else:
                 else:
-                    user_dict[user_name] = str(out_id) + ',' + str(our_id)
-            return user_dict
+                    Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
+                    if our_uid is None:
+                        out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
+                        out_user_dict = {
+                            "out_uid": out_uid,
+                            "user_name": user_name,
+                            "out_avatar_url": out_user_info["out_avatar_url"],
+                            "out_create_time": '',
+                            "out_tag": '',
+                            "out_play_cnt": 0,
+                            "out_fans": out_user_info["out_fans"],
+                            "out_follow": out_user_info["out_follow"],
+                            "out_friend": 0,
+                            "out_like": out_user_info["out_like"],
+                            "platform": cls.platform,
+                            "tag": cls.tag,
+                        }
+                        our_user_dict = Users.create_user(log_type=log_type, crawler=crawler, out_user_dict=out_user_dict, env=env, machine=machine)
+                        our_uid = our_user_dict['our_uid']
+                        our_user_link = our_user_dict['our_user_link']
+                        Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}', [[our_uid, our_user_link]])
+                        Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                        our_user_list.append(our_user_dict)
+                    else:
+                        our_user_dict = {
+                            'out_uid': out_uid,
+                            'user_name': user_name,
+                            'our_uid': our_uid,
+                            'our_user_link': our_user_link,
+                        }
+                        our_user_list.append(our_user_dict)
+            return our_user_list
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'get_user_id_from_feishu异常:{e}\n')
             Common.logger(log_type, crawler).error(f'get_user_id_from_feishu异常:{e}\n')
 
 
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
     @classmethod
     @classmethod
     def get_signature(cls, log_type, crawler, out_uid, machine):
     def get_signature(cls, log_type, crawler, out_uid, machine):
         try:
         try:
@@ -104,8 +205,6 @@ class Follow:
             time.sleep(3)
             time.sleep(3)
             data_src = driver.find_elements(By.XPATH, '//img[@class="tt-img BU-MagicImage tt-img-loaded"]')[1].get_attribute("data-src")
             data_src = driver.find_elements(By.XPATH, '//img[@class="tt-img BU-MagicImage tt-img-loaded"]')[1].get_attribute("data-src")
             signature = data_src.split("x-signature=")[-1]
             signature = data_src.split("x-signature=")[-1]
-            # print(f"data_src:{data_src}")
-            # print(f"signature:{signature}")
             return signature
             return signature
         except Exception as e:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'get_signature异常:{e}\n')
             Common.logger(log_type, crawler).error(f'get_signature异常:{e}\n')
@@ -507,7 +606,7 @@ class Follow:
 
 
     @classmethod
     @classmethod
     def get_videolist(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
     def get_videolist(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
-        signature = cls.get_signature(log_type, crawler, out_uid, machine)
+        signature = cls.random_signature()
         while True:
         while True:
             url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
             url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
             params = {
             params = {
@@ -683,14 +782,14 @@ class Follow:
                                       'session': signature}
                                       'session': signature}
                         for k, v in video_dict.items():
                         for k, v in video_dict.items():
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             strategy=strategy,
-                                             our_uid=our_uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env,
-                                             machine=machine)
+                        # cls.download_publish(log_type=log_type,
+                        #                      crawler=crawler,
+                        #                      video_dict=video_dict,
+                        #                      strategy=strategy,
+                        #                      our_uid=our_uid,
+                        #                      oss_endpoint=oss_endpoint,
+                        #                      env=env,
+                        #                      machine=machine)
 
 
     # 下载 / 上传
     # 下载 / 上传
     @classmethod
     @classmethod
@@ -802,11 +901,19 @@ class Follow:
         #     Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
         #     Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
 
 
 
 
-
-
 if __name__ == '__main__':
 if __name__ == '__main__':
     # print(Follow.get_signature("follow", "xigua", "95420624045", "local"))
     # print(Follow.get_signature("follow", "xigua", "95420624045", "local"))
-    Follow.get_videolist("follow", "xigua", "95420624045", "local")
-
-
+    # Follow.get_videolist(log_type="follow",
+    #                      crawler="xigua",
+    #                      strategy="定向爬虫策略",
+    #                      our_uid="6267141",
+    #                      out_uid="95420624045",
+    #                      oss_endpoint="out",
+    #                      env="dev",
+    #                      machine="local")
+    # print(Follow.random_signature())
+    user_list = Follow.get_user_list(log_type="follow", crawler="xigua", sheetid="5tlTYB", env="dev", machine="local")
+    print(len(user_list))
+    for user in user_list:
+        print(user)
     pass
     pass