jihuaqiang 21 часов назад
Родитель
Сommit
1beefae2b8

+ 7 - 2
examples/content_finder/db/store_results.py

@@ -172,13 +172,14 @@ def update_content_plan_ids(
     aweme_ids: List[str],
     crawler_plan_id: str = "",
     produce_plan_id: str = "",
+    publish_plan_id: str = "",
 ) -> int:
     """
     更新 demand_find_content_result 中指定内容的计划字段。
 
     约定:
     - 通过 (trace_id, aweme_id) 定位内容行
-    - crawler_plan_id / produce_plan_id 可只传其一:仅更新非空字段
+    - crawler_plan_id / produce_plan_id / publish_plan_id 可只传其一:仅更新非空字段
     - 至少一个计划 id 非空时才执行 UPDATE
     - 内部自行获取并关闭数据库连接
     """
@@ -186,7 +187,8 @@ def update_content_plan_ids(
         return 0
     c = (crawler_plan_id or "").strip()
     p = (produce_plan_id or "").strip()
-    if not c and not p:
+    pub = (publish_plan_id or "").strip()
+    if not c and not p and not pub:
         return 0
 
     set_parts: List[str] = []
@@ -197,6 +199,9 @@ def update_content_plan_ids(
     if p:
         set_parts.append("produce_plan_id = %s")
         params.append(p)
+    if pub:
+        set_parts.append("publish_plan_id = %s")
+        params.append(pub)
 
     sql = f"""
     UPDATE demand_find_content_result

+ 1 - 1
examples/content_finder/server.py

@@ -69,7 +69,7 @@ task_semaphore = asyncio.Semaphore(MAX_CONCURRENT_TASKS)
 
 # 定时:轮询间隔(分钟)、单次任务超时(秒,默认 15 分钟)
 SCHEDULE_INTERVAL_MINUTES = int(os.getenv("SCHEDULE_INTERVAL_MINUTES", "2"))
-TASK_TIMEOUT_SECONDS = int(os.getenv("SCHEDULE_TASK_TIMEOUT_SECONDS", "900"))
+TASK_TIMEOUT_SECONDS = int(os.getenv("SCHEDULE_TASK_TIMEOUT_SECONDS", "1200"))
 
 # 统计信息
 stats = {

+ 7 - 3
examples/content_finder/tools/aigc_platform_api.py

@@ -20,12 +20,14 @@ logger = logging.getLogger(__name__)
 _LABEL_ACCOUNT = "工具调用:create_crawler_plan_by_douyin_account_id -> 按抖音账号创建爬取计划"
 _LABEL_CONTENT = "工具调用:create_crawler_plan_by_douyin_content_id -> 按抖音视频创建爬取计划"
 
+AIGC_DEMAND_DOUYIN_CONTENT_PUBLISH_PLAN_ID=20260320065232171836746
+
 
 def _log_aigc_return(label: str, params: Dict[str, Any], r: ToolResult) -> ToolResult:
     log_tool_call(label, params, format_tool_result_for_log(r))
     return r
 
-CAN_NOT_CREATE_PLAN = True
+CAN_NOT_CREATE_PLAN = False
 
 AIGC_BASE_URL = "https://aigc-api.aiddit.com"
 CRAWLER_PLAN_CREATE_URL = f"{AIGC_BASE_URL}/aigc/crawler/plan/save"
@@ -413,14 +415,16 @@ async def create_crawler_plan_by_douyin_content_id(
                     summary_lines.append(f"            绑定结果: {'绑定成功' if not produce_plan_info.get('msg') else '绑定失败'}")
                     summary_lines.append(f"            信息: {produce_plan_info.get('msg', '成功')}")
 
-        # 爬取计划 id 与生成计划 id 任一存在则写库(不依赖是否已配置 produce_plan_ids 去走绑定)
-        if (crawler_plan_id or "").strip() or env_produce_plan_id:
+        publish_plan_id_str = str(AIGC_DEMAND_DOUYIN_CONTENT_PUBLISH_PLAN_ID).strip()
+        # 爬取 / 生成 / 发布计划 id 任一存在则写库(不依赖是否已配置 produce_plan_ids 去走绑定)
+        if (crawler_plan_id or "").strip() or env_produce_plan_id or publish_plan_id_str:
             try:
                 db_updated_rows = update_content_plan_ids(
                     trace_id=trace_id,
                     aweme_ids=content_ids,
                     crawler_plan_id=crawler_plan_id or "",
                     produce_plan_id=env_produce_plan_id,
+                    publish_plan_id=publish_plan_id_str,
                 )
             except Exception as e:
                 logger.error(f"update content plan ids failed: {e}", exc_info=True)