deploy_multi_worker.sh 978 B

12345678910111213141516171819202122232425262728293031323334
  1. #!/bin/bash
  2. # 多worker部署脚本 - 在单台机器上启动API服务
  3. # 使用方法: ./deploy_multi_worker.sh [num_workers] [port]
  4. set -e
  5. # 配置参数
  6. NUM_WORKERS=${1:-2} # 默认2个worker
  7. PORT=${2:-8080} # 默认端口8080
  8. GPU_ID=${3:-0} # 默认GPU 0
  9. LLAMA_CHECKPOINT="checkpoints/s2-pro"
  10. DECODER_CHECKPOINT="checkpoints/s2-pro/codec.pth"
  11. # 设置环境变量
  12. export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
  13. export CUDA_VISIBLE_DEVICES=${GPU_ID}
  14. echo "========================================="
  15. echo "Fish-Speech Multi-Worker Deployment"
  16. echo "========================================="
  17. echo "Workers: ${NUM_WORKERS}"
  18. echo "Port: ${PORT}"
  19. echo "GPU: ${GPU_ID}"
  20. echo "========================================="
  21. # 启动API服务
  22. python tools/api_server.py \
  23. --listen 0.0.0.0:${PORT} \
  24. --llama-checkpoint-path ${LLAMA_CHECKPOINT} \
  25. --decoder-checkpoint-path ${DECODER_CHECKPOINT} \
  26. --half \
  27. --workers 1 \
  28. --num-workers ${NUM_WORKERS}