| 123456789101112131415161718192021222324252627282930313233343536 |
- #!/bin/bash
- # 多worker部署脚本 - 在单台机器上启动API服务
- # 使用方法: ./deploy_multi_worker.sh [num_workers] [port]
- set -e
- # 配置参数
- NUM_WORKERS=${1:-2} # 默认2个worker
- PORT=${2:-8080} # 默认端口8080
- GPU_ID=${3:-0} # 默认GPU 0
- DECODER_CONFIG_NAME="modded_dac_vq"
- LLAMA_CHECKPOINT="/root/fish-checkpoints/s2-pro"
- DECODER_CHECKPOINT="/root/fish-checkpoints/s2-pro/codec.pth"
- # 设置环境变量
- export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
- export CUDA_VISIBLE_DEVICES=${GPU_ID}
- echo "========================================="
- echo "Fish-Speech Multi-Worker Deployment"
- echo "========================================="
- echo "Workers: ${NUM_WORKERS}"
- echo "Port: ${PORT}"
- echo "GPU: ${GPU_ID}"
- echo "========================================="
- # 启动API服务
- python tools/api_server.py \
- --listen 0.0.0.0:${PORT} \
- --llama-checkpoint-path ${LLAMA_CHECKPOINT} \
- --decoder-checkpoint-path ${DECODER_CHECKPOINT} \
- --decoder-config-name "${DECODER_CONFIG_NAME} \
- --half \
- --workers 1 \
- --num-workers ${NUM_WORKERS}
|