File size: 1,625 Bytes
27dfa17 777e804 27dfa17 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
#!/usr/bin/env python3
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
import torch
DEVICE = "cuda:1"
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
model.to(DEVICE)
# forward
print("Forward benchmarks")
print(50 * "=")
for batch_size in (1, 4, 16):
for input_seq in (4, 16, 256):
input_ids = torch.ones((batch_size, input_seq), dtype=torch.long, device=DEVICE)
attention_mask = torch.ones_like(input_ids)
attention_mask[0, 3] = 0
times = []
for _ in range(3):
start_time = time.time()
with torch.no_grad():
logits = model(input_ids=input_ids, attention_mask=attention_mask).logits
times.append(time.time() - start_time)
result = min(times)
print(f"Forward bsz={batch_size}, input_seq={input_seq}: {result}")
# generate
print("Generate benchmarks")
print(50 * "=")
for batch_size in (1, 16):
for input_seq in (4, 256):
input_ids = torch.ones((batch_size, input_seq), dtype=torch.long, device=DEVICE)
attention_mask = torch.ones_like(input_ids)
attention_mask[0, 3] = 0
times = []
for _ in range(3):
start_time = time.time()
out = model.generate(input_ids=input_ids, max_new_tokens=256, do_sample=False)
times.append(time.time() - start_time)
result = min(times)
print(f"Generate bsz={batch_size}, input_seq={input_seq}: {result}")
|