Просмотр исходного кода

"Ensure the version of checkpoints" (#752)

PoTaTo 1 год назад
Родитель
Сommit
f54c50fe62
6 измененных файлов с 8 добавлено и 8 удалено
  1. 2 2
      API_FLAGS.txt
  2. 1 1
      dockerfile
  3. 1 1
      docs/pt/inference.md
  4. 1 1
      tools/llama/generate.py
  5. 2 2
      tools/vqgan/extract_vq.py
  6. 1 1
      tools/vqgan/inference.py

+ 2 - 2
API_FLAGS.txt

@@ -1,6 +1,6 @@
 # --infer
 --api
 --listen 0.0.0.0:8080 \
---llama-checkpoint-path "checkpoints/fish-speech-1.4" \
---decoder-checkpoint-path "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" \
+--llama-checkpoint-path "checkpoints/fish-speech-1.5" \
+--decoder-checkpoint-path "checkpoints/fish-speech-1.5/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" \
 --decoder-config-name firefly_gan_vq

+ 1 - 1
dockerfile

@@ -1,7 +1,7 @@
 FROM python:3.12-slim-bookworm AS stage-1
 ARG TARGETARCH
 
-ARG HUGGINGFACE_MODEL=fish-speech-1.4
+ARG HUGGINGFACE_MODEL=fish-speech-1.5
 ARG HF_ENDPOINT=https://huggingface.co
 
 WORKDIR /opt/fish-speech

+ 1 - 1
docs/pt/inference.md

@@ -70,7 +70,7 @@ Fornecemos uma API HTTP para inferência. O seguinte comando pode ser usado para
 python -m tools.api_server \
     --listen 0.0.0.0:8080 \
     --llama-checkpoint-path "checkpoints/fish-speech-1.5" \
-    --decoder-checkpoint-path "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" \
+    --decoder-checkpoint-path "checkpoints/fish-speech-1.5/firefly-gan-vq-fsq-8x1024-21hz-generator.pth" \
     --decoder-config-name firefly_gan_vq
 ```
 

+ 1 - 1
tools/llama/generate.py

@@ -1019,7 +1019,7 @@ def launch_thread_safe_queue_agent(
 @click.option(
     "--checkpoint-path",
     type=click.Path(path_type=Path, exists=True),
-    default="checkpoints/fish-speech-1.4",
+    default="checkpoints/fish-speech-1.5",
 )
 @click.option("--device", type=str, default="cuda")
 @click.option("--compile/--no-compile", default=False)

+ 2 - 2
tools/vqgan/extract_vq.py

@@ -48,7 +48,7 @@ logger.add(sys.stderr, format=logger_format)
 @lru_cache(maxsize=1)
 def get_model(
     config_name: str = "firefly_gan_vq",
-    checkpoint_path: str = "checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
+    checkpoint_path: str = "checkpoints/fish-speech-1.5/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
     device: str | torch.device = "cuda",
 ):
     with initialize(version_base="1.3", config_path="../../fish_speech/configs"):
@@ -139,7 +139,7 @@ def process_batch(files: list[Path], model) -> float:
 @click.option("--config-name", default="firefly_gan_vq")
 @click.option(
     "--checkpoint-path",
-    default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
+    default="checkpoints/fish-speech-1.5/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
 )
 @click.option("--batch-size", default=64)
 @click.option("--filelist", default=None, type=Path)

+ 1 - 1
tools/vqgan/inference.py

@@ -58,7 +58,7 @@ def load_model(config_name, checkpoint_path, device="cuda"):
 @click.option("--config-name", default="firefly_gan_vq")
 @click.option(
     "--checkpoint-path",
-    default="checkpoints/fish-speech-1.4/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
+    default="checkpoints/fish-speech-1.5/firefly-gan-vq-fsq-8x1024-21hz-generator.pth",
 )
 @click.option(
     "--device",