Spaces:
Running
on
A10G
Running
on
A10G
osanseviero
commited on
Commit
•
667b3c7
1
Parent(s):
813f7d3
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,10 @@ import gradio as gr
|
|
3 |
import numpy as np
|
4 |
from transformers import pipeline
|
5 |
|
|
|
|
|
|
|
|
|
6 |
pipe_flan = pipeline("text2text-generation", model="google/flan-t5-large", device="cuda")
|
7 |
pipe_vanilla = pipeline("text2text-generation", model="t5-large", device="cuda")
|
8 |
|
@@ -24,7 +28,7 @@ description = "Demo that compares [T5-large](https://huggingface.co/t5-large) an
|
|
24 |
def inference(text):
|
25 |
output_flan = pipe_flan(text)[0]["generated_text"]
|
26 |
output_vanilla = pipe_vanilla(text)[0]["generated_text"]
|
27 |
-
return [output_flan,
|
28 |
|
29 |
io = gr.Interface(
|
30 |
inference,
|
|
|
3 |
import numpy as np
|
4 |
from transformers import pipeline
|
5 |
|
6 |
+
import torch
|
7 |
+
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
8 |
+
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
9 |
+
|
10 |
pipe_flan = pipeline("text2text-generation", model="google/flan-t5-large", device="cuda")
|
11 |
pipe_vanilla = pipeline("text2text-generation", model="t5-large", device="cuda")
|
12 |
|
|
|
28 |
def inference(text):
|
29 |
output_flan = pipe_flan(text)[0]["generated_text"]
|
30 |
output_vanilla = pipe_vanilla(text)[0]["generated_text"]
|
31 |
+
return [output_flan, output_vanilla]
|
32 |
|
33 |
io = gr.Interface(
|
34 |
inference,
|