Spaces:
Paused
Paused
rodrigomasini
commited on
Commit
•
10d87a5
1
Parent(s):
b23a956
Update app_v3.py
Browse files
app_v3.py
CHANGED
@@ -35,7 +35,8 @@ use_triton = False
|
|
35 |
|
36 |
if torch.cuda.is_available():
|
37 |
torch.cuda.empty_cache()
|
38 |
-
|
|
|
39 |
#tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True, legacy=False)
|
40 |
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
|
41 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
@@ -50,6 +51,17 @@ model = AutoGPTQForCausalLM.from_quantized(
|
|
50 |
viz = torch.cuda.memory_summary()
|
51 |
st.write(viz)
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
user_input = st.text_input("Input a phrase")
|
54 |
|
55 |
prompt_template = f'USER: {user_input}\nASSISTANT:'
|
|
|
35 |
|
36 |
if torch.cuda.is_available():
|
37 |
torch.cuda.empty_cache()
|
38 |
+
|
39 |
+
|
40 |
#tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True, legacy=False)
|
41 |
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
|
42 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
51 |
viz = torch.cuda.memory_summary()
|
52 |
st.write(viz)
|
53 |
|
54 |
+
def run():
|
55 |
+
output: str = ""
|
56 |
+
try:
|
57 |
+
output = subprocess.check_output(["nvidia-smi"], text=True)
|
58 |
+
except FileNotFoundError:
|
59 |
+
output = subprocess.check_output(["ls", "-alh"], text=True)
|
60 |
+
comment = (
|
61 |
+
datetime.datetime.now().replace(microsecond=0).isoformat().replace("T", " ")
|
62 |
+
)
|
63 |
+
return f"# {comment}\n\n{output}"
|
64 |
+
st.write(run())
|
65 |
user_input = st.text_input("Input a phrase")
|
66 |
|
67 |
prompt_template = f'USER: {user_input}\nASSISTANT:'
|