TeleAI-AI-Flow commited on
Commit
9c777d5
·
verified ·
1 Parent(s): 0e9a77e

Upload 3 files

Browse files

add vllm script and Ministral-3 results

Files changed (3) hide show
  1. README.md +1 -1
  2. assets/ic_mixed.png +2 -2
  3. likelihood_vllm.py +137 -0
README.md CHANGED
@@ -19,7 +19,7 @@ Compared to existing metrics on LLM efficiency, a key difference of information
19
  An effective tokenizer can represent a given text with fewer tokens, thus reducing both the input and output token counts.
20
  This reduction not only lowers computational costs and inference delay but also facilitates long-context memory and in-depth reasoning.
21
  Tokenizer efficiency exhibits growing significance in light of the exploding input length and the widespread usage of test-time scaling, but is often **neglected** in LLM evaluations.
22
- We assess the information capacity of 49 models across 5 heterogeneous datasets and find consistent evidence regarding the influences of tokenizer efficiency, pretraining data, and the mixture-of-experts (MoE) architecture.
23
 
24
  ## Data
25
 
 
19
  An effective tokenizer can represent a given text with fewer tokens, thus reducing both the input and output token counts.
20
  This reduction not only lowers computational costs and inference delay but also facilitates long-context memory and in-depth reasoning.
21
  Tokenizer efficiency exhibits growing significance in light of the exploding input length and the widespread usage of test-time scaling, but is often **neglected** in LLM evaluations.
22
+ We assess the information capacity of 52 models across 5 heterogeneous datasets and find consistent evidence regarding the influences of tokenizer efficiency, pretraining data, and the mixture-of-experts (MoE) architecture.
23
 
24
  ## Data
25
 
assets/ic_mixed.png CHANGED

Git LFS Details

  • SHA256: ac56cb53fd0a9ca5276febbd459b5b4e506138c42434dda3583e40a855defed2
  • Pointer size: 131 Bytes
  • Size of remote file: 212 kB

Git LFS Details

  • SHA256: 05cddd6b1afb1a02d9c8deb5fe7e35707bf7a742750a0454671e3c7887ef2855
  • Pointer size: 131 Bytes
  • Size of remote file: 213 kB
likelihood_vllm.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import torch
3
+ import numpy as np
4
+ import math
5
+ from typing import Iterator, List, Dict
6
+ from itertools import islice, chain
7
+ from transformers import AutoTokenizer
8
+ from vllm import LLM, SamplingParams
9
+ from tqdm import tqdm
10
+
11
+ def batched(iterable, n):
12
+ """Batch data into lists of length n. The last batch may be shorter."""
13
+ it = iter(iterable)
14
+ while True:
15
+ batch = list(islice(it, n))
16
+ if not batch:
17
+ return
18
+ yield batch
19
+
20
+ def calculate_negative_log_likelihood(
21
+ model_path: str,
22
+ jsonl_path: str,
23
+ target_token_length: int,
24
+ batch_size: int = 1,
25
+ tensor_parallel_size: int = 1,
26
+ chunk_size: int = 1000, # Number of samples to hold in RAM before sending to vLLM
27
+ num_samples: int = 200000,
28
+ device: str = "cuda"
29
+ ) -> torch.Tensor:
30
+ """
31
+ Calculates NLL using streaming data loading and vLLM.
32
+ Input is read lazily from disk, preventing OOM on the inputs.
33
+ """
34
+
35
+ # 1. Initialize Tokenizer and vLLM
36
+ # We use the HF tokenizer to handle truncation explicitly before vLLM
37
+ print(f"Initializing model: {model_path}")
38
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
39
+
40
+ llm = LLM(
41
+ model=model_path,
42
+ trust_remote_code=True,
43
+ tensor_parallel_size=tensor_parallel_size,
44
+ dtype="auto",
45
+ enforce_eager=False,
46
+ gpu_memory_utilization=0.8,
47
+ max_model_len=target_token_length + 1,
48
+ )
49
+
50
+ # prompt_logprobs=1 returns the logprob of the token that was actually matched/generated
51
+ sampling_params = SamplingParams(max_tokens=1, prompt_logprobs=1, detokenize=False)
52
+
53
+ # constant for log conversion
54
+ ln_2 = np.log(2)
55
+
56
+ # 2. Define the data generator
57
+ def data_generator() -> Iterator[List[int]]:
58
+ with open(jsonl_path, "r", encoding="utf-8") as f:
59
+ for line in f:
60
+ data = json.loads(line)
61
+ # Tokenize and Truncate immediately to save RAM
62
+ token_ids = tokenizer.encode(
63
+ data["text"],
64
+ truncation=True,
65
+ max_length=target_token_length,
66
+ add_special_tokens=True
67
+ )
68
+ yield token_ids
69
+
70
+ # 3. Process in Chunks
71
+ # If the output tensor is too large for CPU RAM, you should write results to disk
72
+ # inside this loop instead of appending to 'all_results'.
73
+ all_results = []
74
+
75
+ # Create the iterator
76
+ token_iter = data_generator()
77
+
78
+ print(f"Starting streaming inference with chunk size {chunk_size}...")
79
+
80
+ # Loop over batches of the dataset
81
+ for batch_idx, batch_token_ids in enumerate(tqdm(batched(token_iter, chunk_size), total=math.ceil(num_samples / chunk_size) if num_samples is not None else None,
82
+ desc=f"Calculating Entropy for {model_path.split('/')[-1]}")):
83
+
84
+ # Run vLLM on this chunk
85
+ # vLLM handles internal batching for GPU throughput, but this loop manages CPU RAM.
86
+ request_outputs = llm.generate(
87
+ prompt_token_ids=batch_token_ids,
88
+ sampling_params=sampling_params,
89
+ use_tqdm=True
90
+ )
91
+
92
+ # Process results for this chunk
93
+ chunk_entropies = []
94
+
95
+ for i, output in enumerate(request_outputs):
96
+ seq_logprobs = output.prompt_logprobs
97
+ token_ids = batch_token_ids[i]
98
+
99
+ # Extract logprobs for prediction (tokens 1 to N)
100
+ # seq_logprobs[j] corresponds to the token at index j
101
+ current_nlls = []
102
+
103
+ # We predict token[j] given token[0...j-1]
104
+ # prompt_logprobs list aligns with input tokens.
105
+ # Entry 0 is None. Entry 1 is logprob of token 1 given token 0.
106
+ for j in range(1, len(seq_logprobs)):
107
+ token_at_j = token_ids[j]
108
+ step_logprobs = seq_logprobs[j]
109
+
110
+ if step_logprobs is not None and token_at_j in step_logprobs:
111
+ log_prob = step_logprobs[token_at_j].logprob
112
+ # Convert ln to log2 and negate
113
+ current_nlls.append(-(log_prob / ln_2))
114
+ else:
115
+ current_nlls.append(float('nan'))
116
+
117
+ chunk_entropies.append(current_nlls)
118
+
119
+ # Convert chunk to tensor
120
+ # Create tensor filled with NaN
121
+ chunk_tensor = torch.full((len(chunk_entropies), target_token_length - 1), float('nan'))
122
+
123
+ for k, nll_list in enumerate(chunk_entropies):
124
+ # Fill valid data
125
+ valid_len = min(len(nll_list), target_token_length - 1)
126
+ chunk_tensor[k, :valid_len] = torch.tensor(nll_list[:valid_len])
127
+
128
+ all_results.append(chunk_tensor)
129
+
130
+ # Optional: Explicit garbage collection if memory is extremely tight
131
+ del request_outputs, batch_token_ids
132
+
133
+ # 4. Concatenate all chunks
134
+ if not all_results:
135
+ return torch.empty(0)
136
+
137
+ return torch.cat(all_results, dim=0)