zh_CN.json 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. {
  2. "5 to 10 seconds of reference audio, useful for specifying speaker.": "5 到 10 秒的参考音频,适用于指定音色。",
  3. "A text-to-speech model based on VQ-GAN and Llama developed by [Fish Audio](https://fish.audio).": "由 [Fish Audio](https://fish.audio) 研发的基于 VQ-GAN 和 Llama 的多语种语音合成.",
  4. "Accumulate Gradient Batches": "梯度累积批次",
  5. "Add to Processing Area": "加入处理区",
  6. "Added path successfully!": "添加路径成功!",
  7. "Advanced Config": "高级参数",
  8. "Base LLAMA Model": "基础 LLAMA 模型",
  9. "Batch Size": "批次大小",
  10. "Chinese": "中文",
  11. "Compile Model": "编译模型",
  12. "Compile the model can significantly reduce the inference time, but will increase cold start time": "编译模型可以显著减少推理时间,但会增加冷启动时间",
  13. "Copy": "复制",
  14. "Data Preprocessing": "数据预处理",
  15. "Data Preprocessing Path": "数据预处理路径",
  16. "Data Source": "数据源",
  17. "Decoder Model Config": "解码器模型配置",
  18. "Decoder Model Path": "解码器模型路径",
  19. "Disabled": "禁用",
  20. "Enable Reference Audio": "启用参考音频",
  21. "English": "英文",
  22. "Error Message": "错误信息",
  23. "File Preprocessing": "文件预处理",
  24. "Generate": "生成",
  25. "Generated Audio": "音频",
  26. "If there is no corresponding text for the audio, apply ASR for assistance, support .txt or .lab format": "如果音频没有对应的文本,可以应用 ASR 辅助,支持 .txt 或 .lab 格式",
  27. "Infer interface is closed": "推理界面已关闭",
  28. "Inference Configuration": "推理配置",
  29. "Inference Server Configuration": "推理服务器配置",
  30. "Inference Server Error": "推理服务器错误",
  31. "Inferring interface is launched at {}": "推理界面已在 {} 上启动",
  32. "Initial Learning Rate": "初始学习率",
  33. "Input Audio & Source Path for Transcription": "输入音频和转录源路径",
  34. "Input Text": "输入文本",
  35. "Invalid path: {}": "无效路径: {}",
  36. "It is recommended to use CUDA, if you have low configuration, use CPU": "建议使用 CUDA,如果配置较低,使用 CPU",
  37. "Iterative Prompt Length, 0 means off": "迭代提示长度,0 表示关闭",
  38. "Japanese": "日文",
  39. "LLAMA Configuration": "LLAMA 配置",
  40. "LLAMA Model Config": "LLAMA 模型配置",
  41. "LLAMA Model Path": "LLAMA 模型路径",
  42. "Labeling Device": "标注加速设备",
  43. "LoRA Model to be merged": "要合并的 LoRA 模型",
  44. "Maximum Audio Duration": "最大音频时长",
  45. "Maximum Length per Sample": "每个样本的最大长度",
  46. "Maximum Training Steps": "最大训练步数",
  47. "Maximum tokens per batch, 0 means no limit": "每批最大令牌数,0 表示无限制",
  48. "Merge": "合并",
  49. "Merge LoRA": "合并 LoRA",
  50. "Merge successfully": "合并成功",
  51. "Minimum Audio Duration": "最小音频时长",
  52. "Model Output Path": "模型输出路径",
  53. "Model Size": "模型规模",
  54. "Move": "移动",
  55. "Move files successfully": "移动文件成功",
  56. "No audio generated, please check the input text.": "没有生成音频,请检查输入文本.",
  57. "No selected options": "没有选择的选项",
  58. "Number of Workers": "数据加载进程数",
  59. "Open Inference Server": "打开推理服务器",
  60. "Open Labeler WebUI": "打开标注工具",
  61. "Open Tensorboard": "打开 Tensorboard",
  62. "Opened labeler in browser": "在浏览器中打开标注工具",
  63. "Optional Label Language": "[可选] 标注语言",
  64. "Optional online ver": "[可选] 使用在线版",
  65. "Output Path": "输出路径",
  66. "Path error, please check the model file exists in the corresponding path": "路径错误,请检查模型文件是否存在于相应路径",
  67. "Precision": "精度",
  68. "Probability of applying Speaker Condition": "应用说话人条件的概率",
  69. "Put your text here.": "在此处输入文本.",
  70. "Reference Audio": "参考音频",
  71. "Reference Text": "参考文本",
  72. "Related code are released under BSD-3-Clause License, and weights are released under CC BY-NC-SA 4.0 License.": "相关代码使用 BSD-3-Clause 许可证发布,权重使用 CC BY-NC-SA 4.0 许可证发布.",
  73. "Remove Selected Data": "移除选中数据",
  74. "Removed path successfully!": "移除路径成功!",
  75. "Repetition Penalty": "重复惩罚",
  76. "Save model every n steps": "每 n 步保存模型",
  77. "Select LLAMA ckpt": "选择 LLAMA 检查点",
  78. "Select VITS ckpt": "选择 VITS 检查点",
  79. "Select VQGAN ckpt": "选择 VQGAN 检查点",
  80. "Select source file processing method": "选择源文件处理方法",
  81. "Select the model to be trained": "选择要训练的模型",
  82. "Selected: {}": "已选择: {}",
  83. "Speaker": "说话人",
  84. "Speaker is identified by the folder name": "自动根据父目录名称识别说话人",
  85. "Start Training": "开始训练",
  86. "Streaming Audio": "流式音频",
  87. "Streaming Generate": "流式合成",
  88. "Tensorboard Host": "Tensorboard 监听地址",
  89. "Tensorboard Log Path": "Tensorboard 日志路径",
  90. "Tensorboard Port": "Tensorboard 端口",
  91. "Tensorboard interface is closed": "Tensorboard 界面已关闭",
  92. "Tensorboard interface is launched at {}": "Tensorboard 界面已在 {} 上启动",
  93. "Text is too long, please keep it under {} characters.": "文本太长,请保持在 {} 个字符以内.",
  94. "The path of the input folder on the left or the filelist. Whether checked or not, it will be used for subsequent training in this list.": "左侧输入文件夹的路径或文件列表。无论是否选中,都将在此列表中用于后续训练.",
  95. "Training Configuration": "训练配置",
  96. "Training Error": "训练错误",
  97. "Training stopped": "训练已停止",
  98. "Type name of the speaker": "输入说话人的名称",
  99. "Type the path or select from the dropdown": "输入路径或从下拉菜单中选择",
  100. "Use LoRA": "使用 LoRA",
  101. "Use LoRA can save GPU memory, but may reduce the quality of the model": "使用 LoRA 可以节省 GPU 内存,但可能会降低模型质量",
  102. "Use filelist": "使用文件列表",
  103. "Use large for 10G+ GPU, medium for 5G, small for 2G": "10G+ GPU 使用 large, 5G 使用 medium, 2G 使用 small",
  104. "VITS Configuration": "VITS 配置",
  105. "VQGAN Configuration": "VQGAN 配置",
  106. "Validation Batch Size": "验证批次大小",
  107. "View the status of the preprocessing folder (use the slider to control the depth of the tree)": "查看预处理文件夹的状态 (使用滑块控制树的深度)",
  108. "We are not responsible for any misuse of the model, please consider your local laws and regulations before using it.": "我们不对模型的任何滥用负责,请在使用之前考虑您当地的法律法规.",
  109. "WebUI Host": "WebUI 监听地址",
  110. "WebUI Port": "WebUI 端口",
  111. "Whisper Model": "Whisper 模型",
  112. "You can find the source code [here](https://github.com/fishaudio/fish-speech) and models [here](https://huggingface.co/fishaudio/fish-speech-1).": "你可以在 [这里](https://github.com/fishaudio/fish-speech) 找到源代码和 [这里](https://huggingface.co/fishaudio/fish-speech-1) 找到模型.",
  113. "bf16-true is recommended for 30+ series GPU, 16-mixed is recommended for 10+ series GPU": "30+ 系列 GPU 建议使用 bf16-true, 10+ 系列 GPU 建议使用 16-mixed",
  114. "latest": "最近的检查点",
  115. "new": "创建新的检查点"
  116. }