Upload folder using huggingface_hub (#2)
Browse files- cad3df85b7b2334b2857c03565330c39ce1c874a0d2ad8241e264f1475f925f1 (5278bc78862e43f0b2524b7fc61aeb8302f5916a)
- README.md +10 -10
- base_results.json +18 -0
- plots.png +0 -0
- smashed_results.json +18 -0
README.md
CHANGED
@@ -59,16 +59,16 @@ You can run the smashed model with these steps:
|
|
59 |
```
|
60 |
2. Load & run the model.
|
61 |
```python
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
```
|
73 |
|
74 |
## Configurations
|
|
|
59 |
```
|
60 |
2. Load & run the model.
|
61 |
```python
|
62 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
63 |
+
from awq import AutoAWQForCausalLM
|
64 |
+
|
65 |
+
model = AutoAWQForCausalLM.from_quantized("PrunaAI/OpenLLM-France-Lucie-7B-Instruct-AWQ-4bit-smashed", trust_remote_code=True, device_map='auto')
|
66 |
+
tokenizer = AutoTokenizer.from_pretrained("OpenLLM-France/Lucie-7B-Instruct")
|
67 |
+
|
68 |
+
input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
|
69 |
+
|
70 |
+
outputs = model.generate(input_ids, max_new_tokens=216)
|
71 |
+
tokenizer.decode(outputs[0])
|
72 |
```
|
73 |
|
74 |
## Configurations
|
base_results.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"current_gpu_type": "NVIDIA L40S",
|
3 |
+
"current_gpu_total_memory": 45372.6875,
|
4 |
+
"memory_inference_first": 25590.0,
|
5 |
+
"memory_inference": 25590.0,
|
6 |
+
"token_generation_latency_sync": 30.846586608886717,
|
7 |
+
"token_generation_latency_async": 30.805427208542824,
|
8 |
+
"token_generation_throughput_sync": 0.03241849779618423,
|
9 |
+
"token_generation_throughput_async": 0.03246181243422862,
|
10 |
+
"token_generation_CO2_emissions": 1.7413158175960427e-06,
|
11 |
+
"token_generation_energy_consumption": 0.0012653706804628014,
|
12 |
+
"inference_latency_sync": 25.427967834472657,
|
13 |
+
"inference_latency_async": 24.5699405670166,
|
14 |
+
"inference_throughput_sync": 0.039326776190282166,
|
15 |
+
"inference_throughput_async": 0.04070013915061842,
|
16 |
+
"inference_CO2_emissions": 2.974451769021189e-06,
|
17 |
+
"inference_energy_consumption": 1.0356497508141638e-05
|
18 |
+
}
|
plots.png
ADDED
smashed_results.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"current_gpu_type": "NVIDIA L40S",
|
3 |
+
"current_gpu_total_memory": 45372.6875,
|
4 |
+
"memory_inference_first": 4312.0,
|
5 |
+
"memory_inference": 4208.0,
|
6 |
+
"token_generation_latency_sync": 47.82877426147461,
|
7 |
+
"token_generation_latency_async": 48.09337295591831,
|
8 |
+
"token_generation_throughput_sync": 0.020907916112027268,
|
9 |
+
"token_generation_throughput_async": 0.02079288555861086,
|
10 |
+
"token_generation_CO2_emissions": 3.164995601682604e-06,
|
11 |
+
"token_generation_energy_consumption": 0.00230724002830658,
|
12 |
+
"inference_latency_sync": 41.39498291015625,
|
13 |
+
"inference_latency_async": 40.357160568237305,
|
14 |
+
"inference_throughput_sync": 0.02415751691866625,
|
15 |
+
"inference_throughput_async": 0.024778750187569934,
|
16 |
+
"inference_CO2_emissions": 3.351062981628799e-06,
|
17 |
+
"inference_energy_consumption": 1.1458303099082523e-05
|
18 |
+
}
|