deploy_multi_worker.sh 1.1 KB

123456789101112131415161718192021222324252627282930313233343536
  1. #!/bin/bash
  2. # 多worker部署脚本 - 在单台机器上启动API服务
  3. # 使用方法: ./deploy_multi_worker.sh [num_workers] [port]
  4. set -e
  5. # 配置参数
  6. NUM_WORKERS=${1:-2} # 默认2个worker
  7. PORT=${2:-8080} # 默认端口8080
  8. GPU_ID=${3:-0} # 默认GPU 0
  9. DECODER_CONFIG_NAME="modded_dac_vq"
  10. LLAMA_CHECKPOINT="/root/fish-checkpoints/s2-pro"
  11. DECODER_CHECKPOINT="/root/fish-checkpoints/s2-pro/codec.pth"
  12. # 设置环境变量
  13. export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
  14. export CUDA_VISIBLE_DEVICES=${GPU_ID}
  15. echo "========================================="
  16. echo "Fish-Speech Multi-Worker Deployment"
  17. echo "========================================="
  18. echo "Workers: ${NUM_WORKERS}"
  19. echo "Port: ${PORT}"
  20. echo "GPU: ${GPU_ID}"
  21. echo "========================================="
  22. # 启动API服务
  23. python tools/api_server.py \
  24. --listen 0.0.0.0:${PORT} \
  25. --llama-checkpoint-path ${LLAMA_CHECKPOINT} \
  26. --decoder-checkpoint-path ${DECODER_CHECKPOINT} \
  27. --decoder-config-name "${DECODER_CONFIG_NAME} \
  28. --half \
  29. --workers 1 \
  30. --num-workers ${NUM_WORKERS}