""" 分类优化结果可视化工具 功能: 1. 读取优化后的聚类数据 (optimized_clustered_data_*.json) 2. 生成交互式HTML树形可视化 3. 支持查看帖子详情和所有点的信息 4. 区分显示原始分类、LLM抽象、LLM细分 """ import json import os from typing import Dict, Any, List, Optional from pathlib import Path from datetime import datetime class ClassificationTreeVisualizer: """分类树可视化工具""" def __init__(self): self.post_cache: Dict[str, Dict[str, Any]] = {} self.xuanti_point_map: Dict[str, Dict[str, Any]] = {} self.clustered_data: Dict[str, Any] = {} self.dimension_associations: Dict[str, Any] = {} self.intra_dimension_associations: Dict[str, Any] = {} def load_post_data(self, post_id: str, posts_dir: Path) -> Optional[Dict[str, Any]]: """加载帖子详细数据""" if post_id in self.post_cache: return self.post_cache[post_id] post_file = posts_dir / f"{post_id}.json" if not post_file.exists(): return None try: with open(post_file, 'r', encoding='utf-8') as f: post_data = json.load(f) self.post_cache[post_id] = post_data return post_data except Exception as e: print(f"加载帖子 {post_id} 失败: {e}") return None def generate_tree_node_html( self, node_name: str, node_data: Dict[str, Any], level: int, point_type: str, path: List[str] ) -> str: """递归生成树节点的HTML - 支持部分细分结构""" import html as html_module node_name_escaped = html_module.escape(node_name) current_path = path + [node_name] node_id = f"{point_type}_{'_'.join(current_path)}".replace('/', '_').replace(' ', '_') meta = node_data.get('_meta', {}) source = meta.get('分类来源', '') # 检查是否有保留的点 has_kept_points = '点列表' in node_data and len(node_data.get('点列表', [])) > 0 # 检查是否有子分类 has_children = False for key in node_data.keys(): if key not in ['_meta', '点列表', '帖子数', '点数', '帖子列表'] and isinstance(node_data[key], dict): has_children = True break # 确定节点样式 if source == 'LLM抽象' or source == 'LLM细分': node_class = f"tree-node tree-node-llm level-{level}" else: node_class = f"tree-node tree-node-original level-{level}" html = f'
\n' html += f'
\n' html += f' \n' html += f' {node_name_escaped}\n' # 只有存在下级分类节点时,才显示展开/收起全部按钮 if has_children: html += f' \n' html += f'
\n' html += f'
\n' html += '
\n' # 1. 先显示保留在原分类的点(如果有) if has_kept_points: point_name_field = "灵感点" if point_type == "灵感点列表" else ( "目的点" if point_type == "目的点" else "关键点" ) for point in node_data.get('点列表', []): point_name = point.get(point_name_field, '') if not point_name: continue point_name_escaped = html_module.escape(point_name) point_id = f"{node_id}_kept_{point_name}".replace('/', '_').replace(' ', '_') # 获取封面图 post_id = point.get('帖子id', '') thumbnail_html = '' if post_id and post_id in self.post_cache: post_data = self.post_cache[post_id] images = post_data.get('images', []) if images and len(images) > 0: first_image = html_module.escape(images[0]) thumbnail_html = f'封面图' html += f'
\n' html += f'
\n' html += f' 📄\n' html += f' {point_name_escaped}\n' html += f' {thumbnail_html}\n' html += f'
\n' html += '
\n' # 2. 再显示子分类(如果有) if has_children: for child_name, child_data in node_data.items(): if child_name in ['_meta', '点列表', '帖子数', '点数', '帖子列表']: continue if isinstance(child_data, dict): html += self.generate_tree_node_html( child_name, child_data, level + 1, point_type, current_path ) html += '
\n' html += '
\n' html += '
\n' return html def generate_html( self, clustered_data: Dict[str, Any], posts_dir: Path, xuanti_point_map: Dict[str, Dict[str, Any]], dimension_associations: Optional[Dict[str, Any]] = None, intra_dimension_associations: Optional[Dict[str, Any]] = None, expanded_orthogonal_combinations: Optional[Dict[str, Any]] = None, enriched_xuanti_point_map: Optional[Dict[str, Any]] = None ) -> str: """生成完整的HTML页面""" self.xuanti_point_map = xuanti_point_map self.clustered_data = clustered_data if dimension_associations: self.dimension_associations = dimension_associations if intra_dimension_associations: self.intra_dimension_associations = intra_dimension_associations if expanded_orthogonal_combinations: self.expanded_orthogonal_combinations = expanded_orthogonal_combinations if enriched_xuanti_point_map: self.enriched_xuanti_point_map = enriched_xuanti_point_map # 从clustered_data构建完整的帖子ID到特征对象的映射 self.post_to_features_map = self._build_post_to_features_map(clustered_data) # 预加载所有帖子数据 all_post_ids = set() for point_type_data in clustered_data.values(): self._collect_post_ids(point_type_data, all_post_ids) # 从xuanti_point_map中也加载帖子ID for post_id in xuanti_point_map.keys(): all_post_ids.add(post_id) for post_id in all_post_ids: self.load_post_data(post_id, posts_dir) # 生成HTML html = self._generate_html_head() html += '\n' # Tab切换结构 html += '
\n' html += '
\n' html += ' \n' html += ' \n' html += ' \n' html += ' \n' html += '
\n' html += '
\n' # Tab 1: 帖子视角 - 原始选题点 html += '
\n' html += self._generate_tab1_content() html += '
\n' # Tab 2: 特征视角 - 分类优化结果 html += '
\n' html += self._generate_tab2_content(clustered_data) html += '
\n' # Tab 3: 叶子分类组合聚类 html += '
\n' if self.intra_dimension_associations: html += self._generate_tab4_content() else: html += '
未加载叶子分类组合聚类数据
\n' html += '
\n' # Tab 4: 维度关联分析 html += '
\n' if self.dimension_associations: html += self._generate_tab3_content() else: html += '
未加载维度关联分析数据
\n' html += '
\n' # 添加弹窗容器 html += ''' ''' # 添加JavaScript html += self._generate_javascript(posts_dir) html += '\n\n' return html def _generate_tab1_content(self) -> str: """生成Tab1内容:帖子视角 - 原始选题点""" import html as html_module html = '
\n' # 按帖子ID排序 sorted_post_ids = sorted(self.xuanti_point_map.keys()) for post_id in sorted_post_ids: xuanti_point = self.xuanti_point_map[post_id] post_data = self.post_cache.get(post_id) if not post_data: continue # 帖子卡片 html += '
\n' # 左侧:帖子信息 html += '
\n' html += f'
{html_module.escape(post_data.get("title", "无标题"))}
\n' # 封面图 images = post_data.get('images', []) if images and len(images) > 0: first_image = html_module.escape(images[0]) html += f' 封面\n' html += f'
ID: {html_module.escape(post_id[:12])}...
\n' html += '
\n' # 右侧:选题点树状结构 html += '
\n' # 遍历三种点类型 for point_type in ["灵感点列表", "目的点", "关键点列表"]: points = xuanti_point.get(point_type, []) if not points: continue point_name_field = "灵感点" if point_type == "灵感点列表" else ( "目的点" if point_type == "目的点" else "关键点" ) html += f'
\n' html += f'
\n' html += f' {point_type} ({len(points)})\n' html += '
\n' html += '
\n' html += '
\n' # 遍历该类型下的所有点 for idx, point in enumerate(points): point_name = point.get(point_name_field, '') point_id = f"tab1_{post_id}_{point_type}_{idx}" html += f'
\n' html += f'
\n' html += f' \n' html += f' {html_module.escape(point_name)}\n' html += '
\n' # 点的内容:只显示特征列表(扁平化展示) html += f'
\n' # 显示提取的特征(扁平化展示为标签) features = point.get('提取的特征', []) if features: html += '
\n' for feature_idx, feature in enumerate(features): feature_name = feature.get('特征名称', '') feature_weight = feature.get('权重', 0) feature_level1 = feature.get('一级分类', '') feature_level2 = feature.get('二级分类', '') # 构建特征分类标签 feature_class_tag = '' if feature_level1: feature_class_tag = f'{html_module.escape(feature_level1)}' if feature_level2: feature_class_tag += f' / {html_module.escape(feature_level2)}' # 扁平化的特征标签样式 html += f'
\n' html += f' 🔖 {html_module.escape(feature_name)}\n' html += f' ({feature_weight})\n' if feature_class_tag: html += f' {feature_class_tag}\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' # 在点之间添加分割线(最后一个点不添加) if idx < len(points) - 1: html += '
\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' return html def _generate_tab2_content(self, clustered_data: Dict[str, Any]) -> str: """生成Tab2内容:特征视角 - 分类优化结果""" html = '
\n' # 为每种点类型生成树 for point_type in ["灵感点列表", "目的点", "关键点列表"]: type_data = clustered_data.get(point_type, {}) if type_data: html += f'
\n' html += f'
{point_type}
\n' html += '
\n' for node_name, node_data in type_data.items(): html += self.generate_feature_tree_node_html( node_name, node_data, 0, point_type, [] ) html += '
\n' html += '
\n' html += '
\n' return html def _generate_tab3_content(self) -> str: """生成Tab3内容:维度关联分析 - 交互式分类树""" html = '
\n' html += '

维度关联分析

\n' # 模式切换按钮 html += '
\n' html += '
\n' html += ' \n' html += ' \n' html += '
\n' html += '
\n' html += ' 点击一个维度的分类,查看它与其他维度的关联关系\n' html += ' \n' html += '
\n' html += '
\n' # 单维度关联模式:三列布局 html += '
\n' dimensions = [ ("灵感点列表", "灵感点"), ("目的点", "目的点"), ("关键点列表", "关键点") ] for point_type, display_name in dimensions: html += f'
\n' html += f'

{display_name}

\n' html += f'
\n' # 生成分类树 type_data = self.clustered_data.get(point_type, {}) if type_data: for node_name, node_data in type_data.items(): html += self._generate_tab3_tree_node( node_name, node_data, 0, point_type, [] ) html += '
\n' html += '
\n' html += '
\n' # 三维正交关联模式:全屏平铺布局 html += '\n' html += '
\n' return html def _generate_tab3_tree_node( self, node_name: str, node_data: Dict[str, Any], level: int, point_type: str, path: List[str] ) -> str: """递归生成Tab3的树节点HTML,使用数据属性存储关联信息""" import html as html_module node_name_escaped = html_module.escape(node_name) current_path = path + [node_name] path_str = '/'.join(current_path) node_id = f"tab3_{point_type}_{'_'.join(current_path)}".replace('/', '_').replace(' ', '_').replace('(', '').replace(')', '') meta = node_data.get('_meta', {}) source = meta.get('分类来源', '') # 检查是否有子分类 has_children = any( key not in ['_meta', '特征列表', '点列表', '帖子数', '特征数', '点数', '帖子列表'] and isinstance(node_data[key], dict) for key in node_data.keys() ) # 确定节点样式 - 不使用绿色,使用浅灰色背景区分LLM生成的分类 if source in ['LLM抽象', 'LLM细分']: bg_color = '#f0f0f0' text_color = '#333' border_style = '1px solid #d0d0d0' else: bg_color = '#fafafa' text_color = '#333' border_style = '1px solid #e0e0e0' # 节点容器 html = f'
\n' # 节点头部 - 使用data属性存储分类路径和维度信息以及原始样式 padding_left = level * 16 html += f'
▼\n' else: html += ' \n' html += f' {node_name_escaped}\n' html += '
\n' # 封面图容器 - 用于显示该分类下的帖子封面图 html += f' \n' # 子节点容器 - 默认展开 if has_children: html += f'
\n' for child_name, child_data in node_data.items(): if child_name not in ['_meta', '特征列表', '点列表', '帖子数', '特征数', '点数', '帖子列表'] and isinstance(child_data, dict): html += self._generate_tab3_tree_node( child_name, child_data, level + 1, point_type, current_path ) html += '
\n' html += '
\n' return html def _generate_leaf_color(self, index: int, total: int) -> str: """为叶子分类生成不同的颜色(使用HSL色轮)""" # 使用HSL色轮,在0-360度范围内均匀分布 # 降低亮度到45%,增加饱和度到85%,让白色字体更突出 hue = int((index * 360) / max(total, 1)) return f'hsl({hue}, 85%, 45%)' def _highlight_features_in_point_name(self, point_name: str, features: list, leaf_color_map: dict) -> str: """在点名称中高亮显示特征词(使用对应的叶子分类颜色)""" import html as html_module # 构建特征名称到颜色的映射 feature_colors = {} for feature in features: feature_name = feature.get('特征名称', '') leaf_class = feature.get('叶子分类', '') if feature_name and leaf_class in leaf_color_map: feature_colors[feature_name] = leaf_color_map[leaf_class] # 按特征名称长度倒序排序,避免短的先替换导致长的无法匹配 sorted_features = sorted(feature_colors.items(), key=lambda x: len(x[0]), reverse=True) # 先转义整个字符串 highlighted_name = html_module.escape(point_name) # 逐个替换特征词为带颜色的span for feature_name, color in sorted_features: escaped_name = html_module.escape(feature_name) if escaped_name in highlighted_name: highlighted_name = highlighted_name.replace( escaped_name, f'{escaped_name}' ) return highlighted_name def _generate_tab4_content(self) -> str: """生成Tab4内容:叶子分类组合聚类""" import html as html_module html = '
\n' html += '

叶子分类组合聚类分析

\n' # 获取聚类数据 clustering_data = self.intra_dimension_associations.get('叶子分类组合聚类', {}) dimensions = [ ("灵感点", "#9C27B0"), ("目的点", "#E91E63"), ("关键点", "#FF9800") ] for dimension_name, dimension_color in dimensions: dimension_clusters = clustering_data.get(dimension_name, {}) # 按点数倒序排列 sorted_clusters = sorted( dimension_clusters.items(), key=lambda x: x[1]['点数'], reverse=True ) html += f'
\n' html += f'

\n' html += f' {dimension_name} ({len(sorted_clusters)} 个聚类)\n' html += '

\n' if not sorted_clusters: html += '

暂无聚类数据

\n' else: html += '
\n' for cluster_key, cluster_data in sorted_clusters: leaf_classifications = cluster_data.get('叶子分类组合', []) point_count = cluster_data.get('点数', 0) points_details = cluster_data.get('点详情列表', []) html += '
\n' # 建立叶子分类到颜色的映射 total_classifications = len(leaf_classifications) leaf_color_map = {} for idx, leaf_class in enumerate(leaf_classifications): leaf_color_map[leaf_class] = self._generate_leaf_color(idx, total_classifications) # 标题:叶子分类标签 + 点数徽章 html += '
\n' # 叶子分类标签 - 每个分类使用不同颜色 for leaf_class in leaf_classifications: leaf_color = leaf_color_map[leaf_class] html += f'{html_module.escape(leaf_class)}\n' # 点数徽章 html += f'{point_count} 个点\n' html += '
\n' # 点详情列表 html += '
\n' for idx, point_detail in enumerate(points_details[:10]): # 最多显示10个点 point_name = point_detail.get('点名称', '') point_desc = point_detail.get('点描述', '') features = point_detail.get('特征列表', []) # 点卡片 html += f'
\n' # 点名称(高亮显示特征词) highlighted_point_name = self._highlight_features_in_point_name(point_name, features, leaf_color_map) html += f'
{highlighted_point_name}
\n' # 特征列表 if features: html += '
\n' for feature in features: feature_name = feature.get('特征名称', '') full_path = feature.get('完整路径', '') weight = feature.get('权重', 0) # 特征容器(分两行显示) html += '
\n' # 第一行:特征名称 + 权重 html += '
\n' html += f'\n' html += f'{html_module.escape(feature_name)}\n' html += f'权重 {weight}\n' html += '
\n' # 第二行:完整分类路径(缩进),使用灰色背景 html += '
\n' html += f'{html_module.escape(full_path)}\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' if len(points_details) > 10: html += f'
... 还有 {len(points_details) - 10} 个点
\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' html += '
\n' return html def generate_feature_tree_node_html( self, node_name: str, node_data: Dict[str, Any], level: int, point_type: str, path: List[str] ) -> str: """递归生成特征树节点的HTML""" import html as html_module node_name_escaped = html_module.escape(node_name) current_path = path + [node_name] node_id = f"tab2_{point_type}_{'_'.join(current_path)}".replace('/', '_').replace(' ', '_') meta = node_data.get('_meta', {}) source = meta.get('分类来源', '') # 检查是否有保留的特征 has_kept_features = '特征列表' in node_data and len(node_data.get('特征列表', [])) > 0 # 检查是否有子分类 has_children = False for key in node_data.keys(): if key not in ['_meta', '特征列表', '帖子数', '特征数', '帖子列表'] and isinstance(node_data[key], dict): has_children = True break # 确定节点样式 if source == 'LLM抽象' or source == 'LLM细分': node_class = f"tree-node tree-node-llm level-{level}" else: node_class = f"tree-node tree-node-original level-{level}" html = f'
\n' html += f'
\n' html += f' \n' html += f' {node_name_escaped}\n' if has_children: html += f' \n' html += f'
\n' html += f'
\n' html += '
\n' # 1. 先显示保留在原分类的特征(如果有) if has_kept_features: features_list = node_data.get('特征列表', []) # 去重特征名称用于显示 unique_features = {} for feature in features_list: feature_name = feature.get("特征名称", "") if feature_name not in unique_features: unique_features[feature_name] = [] unique_features[feature_name].append(feature) for feature_name, feature_instances in unique_features.items(): feature_name_escaped = html_module.escape(feature_name) feature_id = f"{node_id}_kept_{feature_name}".replace('/', '_').replace(' ', '_') count = len(feature_instances) # 计算权重平均值 weights = [f.get('权重', 0) for f in feature_instances] avg_weight = sum(weights) / len(weights) if weights else 0 avg_weight_str = f"{avg_weight:.2f}" html += f'
\n' html += f'
\n' html += f'
\n' html += f' 🔖\n' html += f' {feature_name_escaped}\n' html += f' ×{count}\n' html += f'
\n' html += f' ⚖️ {avg_weight_str}\n' html += f'
\n' html += '
\n' # 2. 再显示子分类(如果有) if has_children: for child_name, child_data in node_data.items(): if child_name in ['_meta', '特征列表', '帖子数', '特征数', '帖子列表']: continue if isinstance(child_data, dict): html += self.generate_feature_tree_node_html( child_name, child_data, level + 1, point_type, current_path ) html += '
\n' html += '
\n' html += '
\n' return html def _build_post_to_features_map(self, clustered_data: Dict[str, Any]) -> Dict[str, Dict[str, list]]: """从clustered_data构建帖子ID到完整特征对象的映射""" post_to_features = {} def collect_features(node: Any, point_type: str): """递归收集所有特征""" if not isinstance(node, dict): return # 如果有特征列表,收集所有特征 if '特征列表' in node: for feature in node['特征列表']: post_id = feature.get('帖子id') if post_id: if post_id not in post_to_features: post_to_features[post_id] = { '灵感点列表': [], '目的点': [], '关键点列表': [] } post_to_features[post_id][point_type].append(feature) # 递归处理子节点 for key, value in node.items(): if key != '_meta' and isinstance(value, dict): collect_features(value, point_type) # 收集三种类型的特征 for point_type in ['灵感点列表', '目的点', '关键点列表']: if point_type in clustered_data: collect_features(clustered_data[point_type], point_type) return post_to_features def _collect_post_ids(self, data: Any, post_ids: set): """递归收集所有帖子ID""" if isinstance(data, dict): if '帖子列表' in data: post_ids.update(data['帖子列表']) for value in data.values(): self._collect_post_ids(value, post_ids) def _generate_html_head(self) -> str: """生成HTML头部""" return ''' 选题点分析可视化 ''' def _generate_javascript(self, posts_dir: Path) -> str: """生成JavaScript代码""" import json as json_module post_cache_json = json_module.dumps(self.post_cache, ensure_ascii=True) xuanti_point_map_json = json_module.dumps(self.xuanti_point_map, ensure_ascii=True) clustered_data_json = json_module.dumps(self.clustered_data, ensure_ascii=True) post_to_features_map_json = json_module.dumps(self.post_to_features_map, ensure_ascii=True) js_code = ''' ''' return js_code def visualize_classification_tree( optimized_data_path: str, posts_dir: str, xuanti_point_map: Dict[str, Dict[str, Any]], output_path: str = None, dimension_associations_path: str = None, intra_dimension_associations_path: str = None, expanded_orthogonal_combinations_path: str = None, enriched_xuanti_point_map_path: str = None ) -> str: """可视化分类树""" with open(optimized_data_path, 'r', encoding='utf-8') as f: clustered_data = json.load(f) # 加载跨维度关联分析数据(如果提供了路径) dimension_associations = None if dimension_associations_path and os.path.exists(dimension_associations_path): try: with open(dimension_associations_path, 'r', encoding='utf-8') as f: dimension_associations = json.load(f) print(f"✅ 已加载跨维度关联分析数据: {dimension_associations_path}") except Exception as e: print(f"⚠️ 加载跨维度关联分析数据失败: {e}") # 加载维度内部关联分析数据(如果提供了路径) intra_dimension_associations = None if intra_dimension_associations_path and os.path.exists(intra_dimension_associations_path): try: with open(intra_dimension_associations_path, 'r', encoding='utf-8') as f: intra_dimension_associations = json.load(f) print(f"✅ 已加载维度内部关联分析数据: {intra_dimension_associations_path}") except Exception as e: print(f"⚠️ 加载维度内部关联分析数据失败: {e}") # 加载扩展正交组合数据(新增) expanded_orthogonal_combinations = None if expanded_orthogonal_combinations_path and os.path.exists(expanded_orthogonal_combinations_path): try: with open(expanded_orthogonal_combinations_path, 'r', encoding='utf-8') as f: expanded_orthogonal_combinations = json.load(f) print(f"✅ 已加载扩展正交组合数据: {expanded_orthogonal_combinations_path}") except Exception as e: print(f"⚠️ 加载扩展正交组合数据失败: {e}") # 加载丰富选题点映射数据(新增) enriched_xuanti_point_map = None if enriched_xuanti_point_map_path and os.path.exists(enriched_xuanti_point_map_path): try: with open(enriched_xuanti_point_map_path, 'r', encoding='utf-8') as f: enriched_xuanti_point_map = json.load(f) print(f"✅ 已加载丰富选题点映射数据: {enriched_xuanti_point_map_path}") except Exception as e: print(f"⚠️ 加载丰富选题点映射数据失败: {e}") visualizer = ClassificationTreeVisualizer() html_content = visualizer.generate_html( clustered_data, Path(posts_dir), xuanti_point_map, dimension_associations, intra_dimension_associations, expanded_orthogonal_combinations, enriched_xuanti_point_map ) if output_path is None: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") base_dir = os.path.dirname(optimized_data_path) output_path = os.path.join(base_dir, f"visualization/classification_tree_visualization_{timestamp}.html") os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: f.write(html_content) print(f"✅ 可视化文件已生成: {output_path}") return output_path if __name__ == "__main__": account_name = "阿里多多酱" base_dir = "/Users/nieqi/Documents/workspace/python/image_article_comprehension/aiddit/pattern/pattern_from_xuanti_point_label" mode = "detail" optimized_data_path = f"{base_dir}/result/{account_name}/optimization/{mode}/optimized_clustered_data_gemini-3-pro-preview.json" posts_dir = f"/Users/nieqi/Documents/workspace/python/image_article_comprehension/aigc_data/{account_name}" dimension_associations_path = f"{base_dir}/result/{account_name}/optimization/{mode}/dimension_associations_analysis.json" intra_dimension_associations_path = f"{base_dir}/result/{account_name}/optimization/{mode}/intra_dimension_associations_analysis.json" expanded_orthogonal_combinations_path = f"{base_dir}/result/{account_name}/optimization/detail/orthogonal_combinations_clustering.json" enriched_xuanti_point_map_path = f"{base_dir}/result/{account_name}/optimization/{mode}/enriched_xuanti_point_map.json" from aiddit.pattern.pattern_from_xuanti_point_label import pattern_utils xuanti_point_map = pattern_utils.get_xuanti_point__map(account_name) output_path = visualize_classification_tree( optimized_data_path=optimized_data_path, posts_dir=posts_dir, xuanti_point_map=xuanti_point_map, dimension_associations_path=dimension_associations_path, intra_dimension_associations_path=intra_dimension_associations_path, expanded_orthogonal_combinations_path=expanded_orthogonal_combinations_path, enriched_xuanti_point_map_path=enriched_xuanti_point_map_path ) print(f"🎉 可视化完成!请在浏览器中打开: {output_path}")