Browse Source

Reuse cache for different generation size

Lengyue 2 years ago
parent
commit
0dafcf373d
1 changed files with 1 additions and 1 deletions
  1. 1 1
      tools/llama/generate.py

+ 1 - 1
tools/llama/generate.py

@@ -163,7 +163,7 @@ def decode_n_tokens(
     **sampling_kwargs,
 ):
     previous_tokens = torch.zeros(
-        (model.config.num_codebooks + 1, num_new_tokens),
+        (model.config.num_codebooks + 1, model.config.max_seq_len),
         dtype=torch.int,
         device=cur_token.device,
     )