浏览代码

增加抖音相似溯源

zhangyong 7 月之前
父节点
当前提交
33d1e3d95a
共有 4 个文件被更改,包括 156 次插入7 次删除
  1. 2 2
      common/sql_help.py
  2. 135 1
      xssy_channel/dy_rdb_nrxs.py
  3. 4 4
      xssy_channel/sph_jr_nrxs.py
  4. 15 0
      xssy_main.py

+ 2 - 2
common/sql_help.py

@@ -283,8 +283,8 @@ class sqlCollect():
 
     """相似溯源-视频号数据插入"""
     @classmethod
-    def insert_xssy_sph_info(cls, account_user: str, traceable_user: str, traceable_user_v2: str,  has_used: str, appid:Optional[str] = None, pq_id:Optional[str] = None):
-        insert_sql = f"""INSERT INTO xssy_sph (account_user, traceable_user, traceable_user_v2, pq_id, has_used, appid) values ("{account_user}", "{traceable_user}","{traceable_user_v2}","{pq_id}", {has_used},"{appid}")"""
+    def insert_xssy_sph_info(cls, account_user: str, traceable_user: str, channel: str, traceable_user_v2: str,  has_used: str, appid:Optional[str] = None, pq_id:Optional[str] = None):
+        insert_sql = f"""INSERT INTO xssy_sph (account_user, traceable_user, channel, traceable_user_v2, pq_id, has_used, appid) values ("{account_user}", "{traceable_user}","{channel}", "{traceable_user_v2}","{pq_id}", {has_used},"{appid}")"""
         res = MysqlHelper.update_values(
             sql=insert_sql
         )

+ 135 - 1
xssy_channel/dy_rdb_nrxs.py

@@ -1,7 +1,141 @@
+import json
+import re
+import time
+from datetime import datetime
 
+import requests
+from common import Feishu, Material, Common
+from common.sql_help import sqlCollect
+from xssy_channel.sph_jr_nrxs import SphNrxs
 
 
 class DyRdbNrxs:
+
     @classmethod
     def get_dy_rdb_nrxs(cls):
-        pass
+        user = sqlCollect.get_machine_making_reflux("抖音", "抖音历史", "相似溯源", "单点视频")
+        if user == None:
+            return
+        user = [item[0] for item in user]
+        # Feishu.bot("xinxin", '抖音溯源提醒', f'今日需溯源账号共{len(user)}条', 'xinxin')
+        for uid in user:
+            if uid.startswith("MS"):
+                # Feishu.bot("xinxin", '视频号溯源提醒', f'开始溯源账号名称{uid}', 'xinxin')
+                cls.get_nrxs_data(uid)
+
+
+
+
+    @classmethod
+    def get_nrxs_data(cls, uid):
+        cookie = Material.get_cookie_data("KsoMsyP2ghleM9tzBfmcEEXBnXg", "U1gySe", "热点包-cookie")
+
+        url = f"https://douhot.douyin.com/douhot/v1/author_analysis/fans_interest/similar_author?sec_uid={uid}"
+
+        payload = {}
+        headers = {
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            'cookie': cookie,
+            'sec-ch-ua': '"Chromium";v="128", "Not;A=Brand";v="24", "Google Chrome";v="128"',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36'
+        }
+        try:
+            response = requests.request("GET", url, headers=headers, data=payload)
+            response = response.json()
+            code = response['code']
+            if code == 0:
+                status = sqlCollect.select_crawler_uesr_v3(uid)
+                if status:
+                    pq_id = re.sub(r'[(),]', '', str(status))
+                else:
+                    pq_id = SphNrxs.insert_number(uid, '499')
+                    if pq_id == None:
+                        return
+                data_list = response['data']
+                if data_list:
+                    for data in data_list:
+                        user_id = data['user_id']
+                        nick_name = data['nick_name']
+                        has_used = cls.get_rdb_data(user_id, cookie)
+                        if has_used:
+                            res = sqlCollect.insert_xssy_sph_info(uid, user_id, "抖音", nick_name, str(has_used), "", pq_id)
+                            if has_used == 0 and res == 1:
+                                current_time = datetime.now()
+                                formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+                                values = [
+                                    [
+                                        "抖音",
+                                        user_id,
+                                        str(pq_id),
+                                        "5",
+                                        "通用-分享到群",
+                                        "AI片尾引导",
+                                        "",
+                                        "",
+                                        "AI标题",
+                                        "",
+                                        f"溯源账号:{uid}",
+                                        formatted_time
+                                    ]
+                                ]
+                                Feishu.insert_columns("WGIYsSDdxhItBwtJ0xgc0yE7nEg", '0701bd', "ROWS", 1, 2)
+                                time.sleep(0.5)
+                                Feishu.update_values("WGIYsSDdxhItBwtJ0xgc0yE7nEg", '0701bd', "B2:Z2",
+                                                     values)
+                                Feishu.bot("xinxin", '抖音溯源成功提示', f'原账号:{uid},溯源到的账号:{user_id},写入账号:{pq_id}', 'xinxin')
+                        else:
+                            sqlCollect.insert_xssy_sph_info(uid, user_id, "抖音", nick_name, "1")
+                    sqlCollect.update_machine_making_reflux(uid)
+                sqlCollect.update_machine_making_reflux(uid)
+
+            else:
+                Feishu.bot("xinxin", '热点宝提醒', f'热点宝平台 cookie 失效了,请及时更换', 'xinxin')
+                return None
+        except Exception as e:
+            Feishu.bot("xinxin", '热点宝提醒', f'热点宝平台 cookie 失效了,请及时更换', 'xinxin')
+            Common.logger("dy_rdb_nrxs").error(f"用户名:{uid}视频号加热bot异常:{e}\n")
+            return
+
+
+    @classmethod
+    def get_rdb_data(cls, user_id, cookie):
+        url = "http://8.217.190.241:8888/crawler/dou_yin/re_dian_bao/account_fans_portrait"
+
+        payload = json.dumps({
+            "account_id": user_id,
+            "cookie": cookie
+        })
+        headers = {
+            'Content-Type': 'application/json'
+        }
+
+        response = requests.request("POST", url, headers=headers, data=payload)
+        response = response.json()
+        code = response['code']
+        if code == 0:
+            data = response['data']['data']
+            posts = data['posts']
+            avg_like_count = int(posts['avg_like_count'])
+            avg_share_count = int(posts['avg_share_count'])
+            if avg_share_count == 0:
+                return 2
+            if avg_like_count != 0:
+                avg_count = avg_share_count/avg_like_count
+                if float(avg_count) < 0.02:
+                    return 2
+            fans = data['fans']
+            fans_data = fans['age']['data']
+            if fans_data:
+                max_age_group = max(fans_data, key=lambda k: float(fans_data[k]["percentage"].strip('%')))
+                if max_age_group == "50-":
+                    return 0
+                else:
+                    return 3
+        else:
+            Feishu.bot("xinxin", '热点宝提醒', f'热点宝cookie 失效了,请及时更换', 'xinxin')
+            return None
+
+
+if __name__ == '__main__':
+    DyRdbNrxs.get_dy_rdb_nrxs()

+ 4 - 4
xssy_channel/sph_jr_nrxs.py

@@ -13,7 +13,7 @@ class SphNrxs:
 
     """创建票圈账号"""
     @classmethod
-    def insert_number(cls, mid):
+    def insert_number(cls, mid, tag_id):
         for i in range(3):
             url = "https://admin.piaoquantv.com/manager/crawler/v3/user/save"
             payload = {
@@ -24,7 +24,7 @@ class SphNrxs:
                 "recomStatus": -7,
                 "appRecomStatus": -7,
                 "autoAuditStatus": 0,
-                "tag": f"7592,452,8776,467",
+                "tag": f"7592,452,8776,{tag_id}",
                 "contentCategory": 0,
                 "link": str(mid)
             }
@@ -254,7 +254,7 @@ class SphNrxs:
                 if status:
                     pq_id = re.sub(r'[(),]', '', str(status))
                 else:
-                    pq_id = cls.insert_number(uid)
+                    pq_id = cls.insert_number(uid, '467')
                     if pq_id == None:
                         return
                 sqlCollect.update_machine_making_reflux(uid)
@@ -273,7 +273,7 @@ class SphNrxs:
                             #     has_used = cls.get_hx(appid)
                             #     if has_used:
                             #         if has_used == '0':
-                            res = sqlCollect.insert_xssy_sph_info(uid, nick_name, user_name, "0", "", pq_id)
+                            res = sqlCollect.insert_xssy_sph_info(uid, nick_name, "视频号", user_name, "0", "", pq_id)
                             if res == 1:
                                 current_time = datetime.now()
                                 formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")

+ 15 - 0
xssy_main.py

@@ -1,6 +1,7 @@
 import schedule
 import time
 
+from xssy_channel.dy_rdb_nrxs import DyRdbNrxs
 from xssy_channel.sph_jr_nrxs import SphNrxs
 
 def video_start_sph():
@@ -14,8 +15,22 @@ def video_start_sph():
     print("视频号任务结束")
 
 
+def video_start_dy():
+    print("抖音任务开始...")
+
+    try:
+        DyRdbNrxs.get_dy_rdb_nrxs()
+        print("抖音任务成功完成")
+    except Exception as e:
+        print(f"抖音任务执行过程中发生错误: {e}")
+    print("抖音任务结束")
+
+
 # 每天早上 10:30 执行
 schedule.every().day.at("10:30").do(video_start_sph)
+
+schedule.every().day.at("03:30").do(video_start_dy)
+
 # SphNrxs.sph_nrxs_data()
 while True:
     schedule.run_pending()