ariG23498 HF Staff commited on
Commit
c062dcf
·
verified ·
1 Parent(s): 49d3710

Create benchmark-kernels-attn.py

Browse files
Files changed (1) hide show
  1. benchmark-kernels-attn.py +100 -0
benchmark-kernels-attn.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["CUDA_VISIBLE_DEVICES"] = "3"
3
+
4
+ import torch
5
+ from torch.utils import benchmark
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
7
+
8
+
9
+ def load_model(use_kernels, use_attn_kernels, model_id):
10
+ quantization_config = Mxfp4Config(dequantize=True)
11
+ kwargs = {
12
+ "dtype": "auto",
13
+ "device_map": "cuda:0",
14
+ "use_kernels": use_kernels,
15
+ "quantization_config": quantization_config,
16
+ }
17
+ if use_attn_kernels:
18
+ kwargs["attn_implementation"] = "kernels-community/vllm-flash-attn3"
19
+
20
+ return AutoModelForCausalLM.from_pretrained(model_id, **kwargs).eval()
21
+
22
+
23
+ def generate(model, model_inputs, max_new_tokens):
24
+ with torch.inference_mode():
25
+ model.generate(
26
+ **model_inputs,
27
+ do_sample=False,
28
+ temperature=None,
29
+ max_new_tokens=max_new_tokens,
30
+ eos_token_id=-1,
31
+ disable_compile=True,
32
+ )
33
+
34
+
35
+ if __name__ == "__main__":
36
+ model_id = "openai/gpt-oss-20b"
37
+ max_new_tokens = 256
38
+ batch_sizes = [32, 64, 128]
39
+
40
+ base_prompts = [
41
+ "What is Tensor Parallelism?",
42
+ "Explain machine learning fundamentals.",
43
+ "How do neural networks work?",
44
+ "What are the benefits of distributed computing?",
45
+ "Describe the attention mechanism in transformers.",
46
+ "What is gradient descent?",
47
+ "How does backpropagation work?",
48
+ "Explain the concept of overfitting.",
49
+ ]
50
+
51
+ # ============ PRE-TOKENIZE ALL BATCHES ============
52
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
53
+
54
+ pre_tokenized = {}
55
+ for batch_size in batch_sizes:
56
+ messages = [
57
+ [{"role": "user", "content": base_prompts[i % len(base_prompts)]}]
58
+ for i in range(batch_size)
59
+ ]
60
+ texts = [
61
+ tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low")
62
+ for m in messages
63
+ ]
64
+ pre_tokenized[batch_size] = tokenizer(
65
+ texts,
66
+ return_tensors="pt",
67
+ padding=True,
68
+ padding_side="left",
69
+ )
70
+
71
+ # ============ BENCHMARK LOOP ============
72
+ results = []
73
+
74
+ for use_attn_kernels in [True, False]:
75
+ for use_kernels in [True, False]:
76
+ model = load_model(use_kernels, use_attn_kernels, model_id)
77
+
78
+ for batch_size in batch_sizes:
79
+ results.append(
80
+ benchmark.Timer(
81
+ stmt="generate(model, model_inputs, max_new_tokens)",
82
+ setup="from __main__ import generate",
83
+ globals={
84
+ "model": model,
85
+ "model_inputs": pre_tokenized[batch_size].to(model.device),
86
+ "max_new_tokens": max_new_tokens,
87
+ },
88
+ num_threads=torch.get_num_threads(),
89
+ label="Time to generate 256 tokens",
90
+ sub_label=f"batch_size={batch_size}",
91
+ description=f"kernels={use_kernels}, attn_kernels={use_attn_kernels}",
92
+ ).timeit(5)
93
+ )
94
+
95
+ model.to("cpu")
96
+ del model
97
+ torch.cuda.empty_cache()
98
+
99
+ compare = benchmark.Compare(results)
100
+ compare.print()