123456789101112131415161718192021222324 |
- import os
- import torch
- from transformers import AutoModel, AutoConfig, CLIPImageProcessor, AutoTokenizer
- MODEL_NAME = "BAAI/EVA-CLIP-8B"
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
- DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
- MAX_BATCH = int(os.getenv("MAX_BATCH", "32"))
- print(f"[model_config] Loading {MODEL_NAME} on {DEVICE} dtype={DTYPE} ...")
- config = AutoConfig.from_pretrained(MODEL_NAME, trust_remote_code=True)
- model = AutoModel.from_pretrained(
- MODEL_NAME, config=config, trust_remote_code=True
- ).to(dtype=DTYPE, device=DEVICE).eval()
- # ✅ 只管图像处理
- image_processor = CLIPImageProcessor.from_pretrained(MODEL_NAME)
- # 如果后续要做 text embedding,可以加 tokenizer
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
- def get_model():
- return model, image_processor, tokenizer, DEVICE, DTYPE, MAX_BATCH
|