| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716 |
- import os
- import queue
- import threading
- import time
- from contextlib import nullcontext
- from dataclasses import dataclass
- from pathlib import Path
- from typing import Literal, Optional, Tuple, Union
- import click
- import numpy as np
- import torch
- import torch._dynamo.config
- import torch._inductor.config
- from loguru import logger
- from tqdm import tqdm
- from transformers import AutoTokenizer
- from fish_speech.content_sequence import (
- ContentSequence,
- TextPart,
- VQPart,
- )
- from fish_speech.models.text2semantic.llama import BaseModelArgs
- from fish_speech.text import clean_text, split_text
- from fish_speech.tokenizer import IM_END_TOKEN, FishTokenizer
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
- torch._inductor.config.coordinate_descent_tuning = True
- torch._inductor.config.triton.unique_kernel_names = True
- if hasattr(torch._inductor.config, "fx_graph_cache"):
- # Experimental feature to reduce compilation times, will be on by default in future
- torch._inductor.config.fx_graph_cache = True
- from torch.nn.attention import SDPBackend, sdpa_kernel
- from fish_speech.models.text2semantic.llama import (
- BaseTransformer,
- DualARTransformer,
- NaiveTransformer,
- )
- def multinomial_sample_one_no_sync(
- probs_sort,
- ): # Does multinomial sampling without a cuda synchronization
- q = torch.empty_like(probs_sort).exponential_(1)
- return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
- def logits_to_probs(
- logits,
- previous_tokens: Optional[torch.Tensor] = None,
- temperature: torch.Tensor = 1.0,
- top_p: torch.Tensor = 1.0,
- repetition_penalty: torch.Tensor = 1.0,
- ) -> torch.Tensor:
- # Apply repetition penalty
- if previous_tokens is not None:
- previous_tokens = previous_tokens.long()
- score = torch.gather(logits, dim=0, index=previous_tokens)
- score = torch.where(
- score < 0, score * repetition_penalty, score / repetition_penalty
- )
- logits.scatter_(dim=0, index=previous_tokens, src=score)
- # Apply top-p sampling
- sorted_logits, sorted_indices = torch.sort(logits, descending=True)
- cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
- sorted_indices_to_remove = cum_probs > top_p
- sorted_indices_to_remove[0] = False # keep at least one option
- indices_to_remove = sorted_indices_to_remove.scatter(
- dim=0, index=sorted_indices, src=sorted_indices_to_remove
- )
- logits = logits.masked_fill(indices_to_remove, -float("Inf"))
- logits = logits / max(temperature, 1e-5)
- probs = torch.nn.functional.softmax(logits, dim=-1)
- return probs
- def sample(
- logits,
- previous_tokens: Optional[torch.Tensor] = None,
- **sampling_kwargs,
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- probs = logits_to_probs(
- logits=logits[0, -1], previous_tokens=previous_tokens, **sampling_kwargs
- )
- idx_next = multinomial_sample_one_no_sync(probs)
- return idx_next, probs
- def decode_one_token_ar(
- model: DualARTransformer,
- x: torch.Tensor,
- input_pos: torch.Tensor,
- semantic_ids: list,
- previous_tokens: torch.Tensor = None,
- **sampling_kwargs,
- ) -> torch.Tensor:
- x = model.forward_generate(x, input_pos)
- sampling_kwargs_main = sampling_kwargs.copy()
- # sampling_kwargs_main["temperature"] = 0.1
- # sampling_kwargs_main["top_p"] = 0.1
- # sampling_kwargs_main["repetition_penalty"] = 1.0
- codebooks = [
- sample(
- x.logits,
- previous_tokens=(
- previous_tokens[0] if previous_tokens is not None else None
- ), # Disable repetition penalty for the token codebook
- **sampling_kwargs_main,
- )[0]
- ]
- hidden_states = x.hidden_states
- # Cleanup the cache
- for layer in model.fast_layers:
- layer.attention.kv_cache.k_cache.fill_(0)
- layer.attention.kv_cache.v_cache.fill_(0)
- input_pos = torch.tensor([0], device=hidden_states.device, dtype=torch.long)
- model.forward_generate_fast(hidden_states, input_pos)
- a = codebooks[0] - model.tokenizer.semantic_begin_id
- a[a < 0] = 0
- hidden_states = model.fast_embeddings(a)
- codebooks.append(a)
- for codebook_idx in range(1, model.config.num_codebooks):
- input_pos = torch.tensor(
- [codebook_idx], device=hidden_states.device, dtype=torch.long
- )
- logits = model.forward_generate_fast(hidden_states, input_pos)
- chunked_logits = logits[..., :1024]
- a = sample(
- chunked_logits,
- previous_tokens=(
- previous_tokens[codebook_idx + 1]
- if previous_tokens is not None
- else None
- ),
- **sampling_kwargs,
- )[0]
- hidden_states = model.fast_embeddings(a)
- codebooks.append(a)
- codebooks = torch.stack(codebooks, dim=0)
- # semantic_ids_tensor = torch.tensor(semantic_ids, device=codebooks.device)
- # codebooks[1:, :] = torch.masked_fill(
- # codebooks[1:, :], ~torch.isin(codebooks[:1, :], semantic_ids_tensor), CODEBOOK_PAD_TOKEN_ID
- # )
- # print(codebooks)
- return codebooks
- def decode_n_tokens(
- model: NaiveTransformer,
- cur_token: torch.Tensor,
- input_pos: torch.Tensor,
- num_new_tokens: int,
- semantic_ids: list,
- decode_one_token=decode_one_token_ar,
- **sampling_kwargs,
- ):
- previous_tokens = torch.zeros(
- (model.config.num_codebooks + 1, model.config.max_seq_len),
- dtype=torch.int,
- device=cur_token.device,
- )
- for i in tqdm(range(num_new_tokens)):
- # We need to get windowed repeat penalty
- win_size = 16
- if i < win_size:
- window = previous_tokens[:, :win_size]
- else:
- window = previous_tokens[:, i - win_size : i]
- with (
- torch.backends.cuda.sdp_kernel(
- enable_flash=False, enable_mem_efficient=False, enable_math=True
- )
- if torch.cuda.is_available()
- else nullcontext()
- ): # Actually better for Inductor to codegen attention here
- next_token = decode_one_token(
- model=model,
- x=cur_token,
- input_pos=input_pos,
- previous_tokens=window,
- semantic_ids=semantic_ids,
- **sampling_kwargs,
- )
- input_pos += 1
- cur_token = next_token.view(1, model.config.num_codebooks + 1, -1)
- previous_tokens[:, i : i + 1] = next_token.view(
- model.config.num_codebooks + 1, -1
- )
- if cur_token[0, 0, -1] == model.tokenizer.get_token_id(IM_END_TOKEN):
- break
- return previous_tokens[:, : i + 1]
- @torch.no_grad()
- @torch.inference_mode()
- def generate(
- *,
- model: NaiveTransformer,
- prompt: torch.Tensor,
- max_new_tokens: int,
- decode_one_token=decode_one_token_ar,
- **sampling_kwargs,
- ) -> torch.Tensor:
- """
- Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
- """
- # create an empty tensor of the expected final shape and fill in the current tokens
- T = prompt.size(1)
- # semantic_id = model.tokenizer.convert_tokens_to_ids("<|semantic|>")
- semantic_ids = [
- model.tokenizer.get_token_id(f"<|semantic:{i}|>") for i in range(1024)
- ]
- if max_new_tokens:
- if T + max_new_tokens > model.config.max_seq_len:
- max_new_tokens = model.config.max_seq_len - T
- logger.info(f"Truncating max_new_tokens to {max_new_tokens}")
- T_new = T + max_new_tokens
- else:
- T_new = model.config.max_seq_len
- max_new_tokens = T_new - T
- device, dtype = prompt.device, prompt.dtype
- codebook_dim = 1 + model.config.num_codebooks
- # create an empty tensor of the expected final shape and fill in the current tokens
- empty = torch.empty(
- (codebook_dim, model.config.max_seq_len), dtype=dtype, device=device
- )
- empty[:, :T] = prompt
- seq = empty
- input_pos = torch.arange(0, T, device=device)
- # Use non-accelerated version for now, to avoid compilation overhead
- prefill_decode = decode_one_token_ar
- next_token = prefill_decode(
- model,
- prompt.view(1, codebook_dim, -1),
- input_pos,
- semantic_ids=semantic_ids,
- **sampling_kwargs,
- )
- seq[:, T : T + 1] = next_token
- input_pos = torch.tensor([T], device=device, dtype=torch.int)
- x = decode_n_tokens(
- model,
- next_token.view(1, codebook_dim, -1),
- input_pos,
- max_new_tokens - 1,
- decode_one_token=decode_one_token,
- semantic_ids=semantic_ids,
- **sampling_kwargs,
- )
- # x = torch.cat(generated_tokens, dim=1)
- seq = seq[:, : T + 1 + x.size(1)]
- seq[:, T + 1 :] = x
- return seq
- def load_model(checkpoint_path, device, precision, compile=False):
- model = DualARTransformer.from_pretrained(checkpoint_path, load_weights=True)
- model = model.to(device=device, dtype=precision)
- logger.info(f"Restored model from checkpoint")
- if isinstance(model, DualARTransformer):
- decode_one_token = decode_one_token_ar
- logger.info("Using DualARTransformer")
- else:
- raise ValueError("Model is not a DualARTransformer")
- if compile:
- logger.info("Compiling function...")
- decode_one_token = torch.compile(
- decode_one_token,
- fullgraph=True,
- backend="inductor" if torch.cuda.is_available() else "aot_eager",
- mode="reduce-overhead" if torch.cuda.is_available() else None,
- )
- return model.eval(), decode_one_token
- @dataclass
- class GenerateResponse:
- action: Literal["sample", "next"]
- codes: Optional[torch.Tensor] = None
- text: Optional[str] = None
- def generate_long(
- *,
- model,
- device: str | torch.device,
- decode_one_token: callable,
- text: str,
- num_samples: int = 1,
- max_new_tokens: int = 0,
- top_p: int = 0.8,
- repetition_penalty: float = 1.1,
- temperature: float = 0.8,
- compile: bool = False,
- iterative_prompt: bool = True,
- chunk_length: int = 150,
- prompt_text: Optional[str | list[str]] = None,
- prompt_tokens: Optional[torch.Tensor | list[torch.Tensor]] = None,
- ):
- assert 0 < top_p <= 1, "top_p must be in (0, 1]"
- assert 0 < repetition_penalty < 2, "repetition_penalty must be in (0, 2)"
- assert 0 < temperature < 2, "temperature must be in (0, 2)"
- use_prompt = prompt_text is not None and prompt_tokens is not None
- if use_prompt and isinstance(prompt_text, str):
- prompt_text = [prompt_text]
- prompt_tokens = [prompt_tokens]
- assert use_prompt is False or len(prompt_text) == len(
- prompt_tokens
- ), "Prompt text and tokens must have the same length"
- prompt_tokens = [i.cpu() for i in prompt_tokens]
- model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)
- tokenizer = model.tokenizer
- base_content_sequence = ContentSequence(modality="interleave")
- texts = split_text(text, chunk_length) if iterative_prompt else [text]
- max_length = model.config.max_seq_len
- if use_prompt:
- for t, c in zip(prompt_text, prompt_tokens):
- base_content_sequence.append(
- [
- TextPart(text=t),
- VQPart(codes=c),
- ],
- add_end=True,
- )
- encoded_prompts = base_content_sequence.encode_for_inference(
- tokenizer, num_codebooks=model.config.num_codebooks
- )
- if encoded_prompts.size(1) > max_length - 2048:
- raise ValueError(
- f"Prompt is too long: {encoded_prompts.size(1)} > {max_length - 2048}"
- )
- encoded = []
- for text in texts:
- content_sequence = ContentSequence(modality=None)
- content_sequence.append(TextPart(text=text))
- encoded.append(
- content_sequence.encode_for_inference(
- tokenizer, num_codebooks=model.config.num_codebooks
- )
- )
- logger.info(f"Encoded text: {text}")
- # Move temperature, top_p, repetition_penalty to device
- # This is important so that changing params doesn't trigger recompile
- temperature = torch.tensor(temperature, device=device, dtype=torch.float)
- top_p = torch.tensor(top_p, device=device, dtype=torch.float)
- repetition_penalty = torch.tensor(
- repetition_penalty, device=device, dtype=torch.float
- )
- for sample_idx in range(num_samples):
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- global_encoded = []
- seg_idx = 0
- while seg_idx < len(encoded):
- logger.info(
- f"Generating sentence {seg_idx + 1}/{len(encoded)} of sample {sample_idx + 1}/{num_samples}"
- )
- seg = encoded[seg_idx]
- global_encoded.append(seg)
- # Do not use previous segments to generate current segment for now
- # lengths = reversed([seg.size(1) for seg in global_encoded])
- # # Pick last 2000 tokens
- # count = 0
- # for i, length in enumerate(lengths):
- # count += length
- # if count + length > max_length - 2048 - encoded_prompts.size(1):
- # break
- # if i != 0 and i % 2 == 0:
- # i -= 1
- # # Rotate the list, always make sure first segment is included to avoid drift
- # if i < len(global_encoded) - 2:
- # partial_encoded = global_encoded[:2] + global_encoded[-i:]
- # else:
- # partial_encoded = global_encoded
- # cat_encoded = torch.cat([encoded_prompts, *partial_encoded], dim=1)
- if len(base_content_sequence.parts) <= 1 and len(global_encoded) >= 2:
- cat_encoded = torch.cat(
- [encoded_prompts, global_encoded[0], global_encoded[1], seg], dim=1
- )
- else:
- cat_encoded = torch.cat([encoded_prompts, seg], dim=1)
- cat_encoded = cat_encoded.to(device=device)
- prompt_length = cat_encoded.size(1)
- t0 = time.perf_counter()
- y = generate(
- model=model,
- prompt=cat_encoded,
- max_new_tokens=max_new_tokens,
- decode_one_token=decode_one_token,
- temperature=temperature,
- top_p=top_p,
- repetition_penalty=repetition_penalty,
- )
- if sample_idx == 0 and seg_idx == 0 and compile:
- logger.info(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- t = time.perf_counter() - t0
- tokens_generated = y.size(1) - prompt_length
- tokens_sec = tokens_generated / t
- logger.info(
- f"Generated {tokens_generated} tokens in {t:.02f} seconds, {tokens_sec:.02f} tokens/sec"
- )
- logger.info(
- f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s"
- )
- if torch.cuda.is_available():
- logger.info(
- f"GPU Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB"
- )
- # Put the generated tokens
- # since there is <im_end>, we remove last token
- codes = y[1:, prompt_length:-1].clone()
- assert (codes >= 0).all(), f"Negative code found"
- decoded = y[:, prompt_length:].clone()
- # But for global encoding, we should keep the <im_end> token
- global_encoded.append(decoded.cpu())
- assert (codes >= 0).all(), f"Negative code found: {codes}"
- yield GenerateResponse(action="sample", codes=codes, text=texts[seg_idx])
- seg_idx += 1
- # This indicates the end of the current sample
- yield GenerateResponse(action="next")
- @dataclass
- class WrappedGenerateResponse:
- status: Literal["success", "error"]
- response: Optional[GenerateResponse | Exception] = None
- @dataclass
- class GenerateRequest:
- request: dict
- response_queue: queue.Queue
- def launch_thread_safe_queue(
- checkpoint_path,
- device,
- precision,
- compile: bool = False,
- ):
- input_queue = queue.Queue()
- init_event = threading.Event()
- def worker():
- model, decode_one_token = load_model(
- checkpoint_path, device, precision, compile=compile
- )
- with torch.device(device):
- model.setup_caches(
- max_batch_size=1,
- max_seq_len=model.config.max_seq_len,
- dtype=next(model.parameters()).dtype,
- )
- init_event.set()
- while True:
- item: GenerateRequest | None = input_queue.get()
- if item is None:
- break
- kwargs = item.request
- response_queue = item.response_queue
- try:
- for chunk in generate_long(
- model=model, decode_one_token=decode_one_token, **kwargs
- ):
- response_queue.put(
- WrappedGenerateResponse(status="success", response=chunk)
- )
- except Exception as e:
- response_queue.put(WrappedGenerateResponse(status="error", response=e))
- threading.Thread(target=worker, daemon=True).start()
- init_event.wait()
- return input_queue
- def launch_thread_safe_queue_agent(
- checkpoint_path,
- device,
- precision,
- compile: bool = False,
- ):
- input_queue = queue.Queue()
- init_event = threading.Event()
- tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
- config = BaseModelArgs.from_pretrained(checkpoint_path)
- def worker():
- model, decode_one_token = load_model(
- checkpoint_path, device, precision, compile=compile, is_agent=True
- )
- with torch.device(device):
- model.setup_caches(
- max_batch_size=1,
- max_seq_len=model.config.max_seq_len,
- dtype=next(model.parameters()).dtype,
- )
- init_event.set()
- while True:
- item: GenerateRequest | None = input_queue.get()
- if item is None:
- break
- kwargs = item.request
- response_queue = item.response_queue
- try:
- for token in generate_agent(
- model=model,
- decode_one_token=decode_one_token,
- **kwargs,
- ):
- response_queue.put(token)
- response_queue.put("stop")
- except Exception as e:
- import traceback
- logger.exception(f"Error in worker: {traceback.format_exc()}")
- response_queue.put("error")
- threading.Thread(target=worker, daemon=True).start()
- init_event.wait()
- return input_queue, tokenizer, config
- @click.command()
- @click.option(
- "--text",
- type=str,
- default="你说的对, 但是原神是一款由米哈游自主研发的开放世界手游.",
- )
- @click.option("--prompt-text", type=str, default=None, multiple=True)
- @click.option(
- "--prompt-tokens",
- type=click.Path(path_type=Path, exists=True),
- default=None,
- multiple=True,
- )
- @click.option("--num-samples", type=int, default=1)
- @click.option("--max-new-tokens", type=int, default=0)
- @click.option("--top-p", type=float, default=0.8)
- @click.option("--repetition-penalty", type=float, default=1.1)
- @click.option("--temperature", type=float, default=0.8)
- @click.option(
- "--checkpoint-path",
- type=click.Path(path_type=Path, exists=True),
- default="checkpoints/openaudio-s1-mini",
- )
- @click.option("--device", type=str, default="cuda")
- @click.option("--compile/--no-compile", default=False)
- @click.option("--seed", type=int, default=42)
- @click.option("--half/--no-half", default=False)
- @click.option("--iterative-prompt/--no-iterative-prompt", default=True)
- @click.option("--chunk-length", type=int, default=300)
- @click.option("--output-dir", type=Path, default="temp")
- def main(
- text: str,
- prompt_text: Optional[list[str]],
- prompt_tokens: Optional[list[Path]],
- num_samples: int,
- max_new_tokens: int,
- top_p: int,
- repetition_penalty: float,
- temperature: float,
- checkpoint_path: Path,
- device: str,
- compile: bool,
- seed: int,
- half: bool,
- iterative_prompt: bool,
- chunk_length: int,
- output_dir: Path,
- ) -> None:
- os.makedirs(output_dir, exist_ok=True)
- precision = torch.half if half else torch.bfloat16
- if prompt_text is not None and len(prompt_text) != len(prompt_tokens):
- raise ValueError(
- f"Number of prompt text ({len(prompt_text)}) and prompt tokens ({len(prompt_tokens)}) should be the same"
- )
- logger.info("Loading model ...")
- t0 = time.time()
- model, decode_one_token = load_model(
- checkpoint_path, device, precision, compile=compile
- )
- with torch.device(device):
- model.setup_caches(
- max_batch_size=1,
- max_seq_len=model.config.max_seq_len,
- dtype=next(model.parameters()).dtype,
- )
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- logger.info(f"Time to load model: {time.time() - t0:.02f} seconds")
- if prompt_tokens is not None:
- prompt_tokens = [torch.from_numpy(np.load(p)) for p in prompt_tokens]
- torch.manual_seed(seed)
- if torch.cuda.is_available():
- torch.cuda.manual_seed(seed)
- generator = generate_long(
- model=model,
- device=device,
- decode_one_token=decode_one_token,
- text=text,
- num_samples=num_samples,
- max_new_tokens=max_new_tokens,
- top_p=top_p,
- repetition_penalty=repetition_penalty,
- temperature=temperature,
- compile=compile,
- iterative_prompt=iterative_prompt,
- chunk_length=chunk_length,
- prompt_text=prompt_text,
- prompt_tokens=prompt_tokens,
- )
- idx = 0
- codes = []
- for response in generator:
- if response.action == "sample":
- codes.append(response.codes)
- logger.info(f"Sampled text: {response.text}")
- elif response.action == "next":
- if codes:
- codes_npy_path = os.path.join(output_dir, f"codes_{idx}.npy")
- np.save(codes_npy_path, torch.cat(codes, dim=1).cpu().numpy())
- logger.info(f"Saved codes to {codes_npy_path}")
- logger.info(f"Next sample")
- codes = []
- idx += 1
- else:
- logger.error(f"Error: {response}")
- if __name__ == "__main__":
- main()
|