ariG23498 HF Staff commited on
Commit
6ad3ca7
·
verified ·
1 Parent(s): c062dcf

Create benchmark-fa3.py

Browse files
Files changed (1) hide show
  1. benchmark-fa3.py +137 -0
benchmark-fa3.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils import benchmark
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
4
+
5
+ # ============ CONFIGURATION ============
6
+ MODEL_ID = "openai/gpt-oss-20b"
7
+ MAX_NEW_TOKENS = 256
8
+ BENCHMARK_RUNS = 5
9
+
10
+ SOURCE_FILES = [
11
+ "/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modeling_gpt_oss.py", # Fixed: missing comma
12
+ "/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modular_gpt_oss.py",
13
+ ]
14
+
15
+
16
+ # ============ MODEL LOADING ============
17
+ def load_model(model_id: str, use_attn_kernels: bool):
18
+ """Load model with optional attention kernel optimization."""
19
+ quantization_config = Mxfp4Config(dequantize=True)
20
+
21
+ kwargs = {
22
+ "dtype": "auto",
23
+ "device_map": "cuda:0",
24
+ "use_kernels": False,
25
+ "quantization_config": quantization_config,
26
+ }
27
+
28
+ if use_attn_kernels:
29
+ kwargs["attn_implementation"] = "kernels-community/vllm-flash-attn3"
30
+
31
+ return AutoModelForCausalLM.from_pretrained(model_id, **kwargs).eval()
32
+
33
+
34
+ def unload_model(model):
35
+ """Move model to CPU and free GPU memory."""
36
+ model.to("cpu")
37
+ del model
38
+ torch.cuda.empty_cache()
39
+
40
+
41
+ # ============ GENERATION ============
42
+ def generate(model, model_inputs: dict, max_new_tokens: int):
43
+ """Run inference without sampling."""
44
+ with torch.inference_mode():
45
+ model.generate(
46
+ **model_inputs,
47
+ do_sample=False,
48
+ temperature=None,
49
+ max_new_tokens=max_new_tokens,
50
+ eos_token_id=-1,
51
+ disable_compile=True,
52
+ )
53
+
54
+
55
+ # ============ DATA PREPARATION ============
56
+ def load_prompts(filepaths: list[str]) -> list[str]:
57
+ """Read source files and create summarization prompts."""
58
+ prompts = []
59
+ for filepath in filepaths:
60
+ with open(filepath, "r") as f:
61
+ prompts.append(f"{f.read()}\nSummarize this for me.")
62
+ return prompts
63
+
64
+
65
+ def tokenize_prompts(tokenizer, prompts: list[str]) -> list[tuple[dict, int]]:
66
+ """Tokenize prompts and return inputs with their prefill sizes."""
67
+ tokenizer.padding_side = "left"
68
+ tokenized = []
69
+
70
+ for prompt in prompts:
71
+ message = [{"role": "user", "content": prompt}]
72
+ text = tokenizer.apply_chat_template(
73
+ message, # Fixed: was `m`
74
+ add_generation_prompt=True,
75
+ tokenize=False,
76
+ reasoning_effort="low",
77
+ )
78
+ inputs = tokenizer(text, return_tensors="pt", padding=True)
79
+ prefill_size = inputs.input_ids.size(1) # Fixed: was `input` and `.size[1]`
80
+ tokenized.append((inputs, prefill_size))
81
+
82
+ return tokenized
83
+
84
+
85
+ # ============ BENCHMARKING ============
86
+ def run_benchmarks(model, tokenized_inputs: list[tuple], use_attn_kernels: bool) -> list:
87
+ """Run timing benchmarks for each input."""
88
+ results = []
89
+
90
+ for inputs, prefill_size in tokenized_inputs:
91
+ timer = benchmark.Timer(
92
+ stmt="generate(model, model_inputs, max_new_tokens)",
93
+ setup="from __main__ import generate",
94
+ globals={
95
+ "model": model,
96
+ "model_inputs": inputs.to(model.device),
97
+ "max_new_tokens": MAX_NEW_TOKENS,
98
+ },
99
+ num_threads=torch.get_num_threads(),
100
+ label=f"Time to generate {MAX_NEW_TOKENS} tokens",
101
+ sub_label=f"prefill_size={prefill_size}",
102
+ description=f"attn_kernels={use_attn_kernels}",
103
+ )
104
+ results.append(timer.timeit(BENCHMARK_RUNS))
105
+
106
+ return results
107
+
108
+
109
+ # ============ MAIN ============
110
+ def main():
111
+ prompts = load_prompts(SOURCE_FILES)
112
+
113
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
114
+ tokenized_inputs = tokenize_prompts(tokenizer, prompts)
115
+
116
+ all_results = []
117
+ for use_attn_kernels in [True, False]:
118
+ print(f"\nBenchmarking with attn_kernels={use_attn_kernels}...")
119
+
120
+ model = load_model(MODEL_ID, use_attn_kernels)
121
+ results = run_benchmarks(model, tokenized_inputs, use_attn_kernels)
122
+ all_results.extend(results)
123
+ unload_model(model)
124
+
125
+ benchmark.Compare(all_results).print()
126
+
127
+
128
+ if __name__ == "__main__":
129
+ main()
130
+
131
+ # [------------------ Time to generate 256 tokens -------------------]
132
+ # | attn_kernels=True | attn_kernels=False
133
+ # 12 threads: --------------------------------------------------------
134
+ # prefill_size=7353 | 8.3 | 10.2
135
+ # prefill_size=4225 | 8.3 | 9.0
136
+
137
+ # Times are in seconds (s).