| |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import time |
| import torch |
|
|
| DEVICE = "cuda:1" |
|
|
| tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") |
| model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True) |
| model.to(DEVICE) |
|
|
|
|
| |
| print("Forward benchmarks") |
| print(50 * "=") |
|
|
| for batch_size in (1, 4, 16): |
| for input_seq in (4, 16, 256): |
| input_ids = torch.ones((batch_size, input_seq), dtype=torch.long, device=DEVICE) |
| attention_mask = torch.ones_like(input_ids) |
| attention_mask[0, 3] = 0 |
|
|
| times = [] |
| for _ in range(3): |
| start_time = time.time() |
| with torch.no_grad(): |
| logits = model(input_ids=input_ids, attention_mask=attention_mask).logits |
| times.append(time.time() - start_time) |
|
|
| result = min(times) |
|
|
| print(f"Forward bsz={batch_size}, input_seq={input_seq}: {result}") |
|
|
|
|
| |
| print("Generate benchmarks") |
| print(50 * "=") |
|
|
| for batch_size in (1, 16): |
| for input_seq in (4, 256): |
| input_ids = torch.ones((batch_size, input_seq), dtype=torch.long, device=DEVICE) |
| attention_mask = torch.ones_like(input_ids) |
| attention_mask[0, 3] = 0 |
|
|
| times = [] |
| for _ in range(3): |
| start_time = time.time() |
| out = model.generate(input_ids=input_ids, max_new_tokens=256, do_sample=False) |
| times.append(time.time() - start_time) |
|
|
| result = min(times) |
|
|
| print(f"Generate bsz={batch_size}, input_seq={input_seq}: {result}") |
|
|