Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,54 @@
|
|
1 |
from openai import OpenAI
|
|
|
|
|
|
|
2 |
|
3 |
client = OpenAI(
|
4 |
base_url = "https://integrate.api.nvidia.com/v1",
|
5 |
api_key = "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"
|
6 |
)
|
7 |
|
8 |
-
completion = client.chat.completions.create(
|
9 |
-
model="nvidia/nemotron-4-340b-instruct",
|
10 |
-
messages=[{"role":"user","content":"Write a limerick about the wonders of GPU computing."}],
|
11 |
-
temperature=0.2,
|
12 |
-
top_p=0.7,
|
13 |
-
max_tokens=1024,
|
14 |
-
stream=True
|
15 |
-
)
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
|
|
1 |
from openai import OpenAI
|
2 |
+
import streamlit as st
|
3 |
+
import os
|
4 |
+
from datetime import datetime
|
5 |
|
6 |
client = OpenAI(
|
7 |
base_url = "https://integrate.api.nvidia.com/v1",
|
8 |
api_key = "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"
|
9 |
)
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
|
13 |
+
st.title("Nemotron 4 340B")
|
14 |
+
|
15 |
+
with st.sidebar:
|
16 |
+
st.markdown("This is a basic chatbot. Ask anything. The app is supported by Nazmul Hasan Nihal")
|
17 |
+
if st.button("Clear Session"):
|
18 |
+
st.session_state.clear()
|
19 |
+
st.write(f"Copyright 2023-{datetime.now().year Present Nazmul Hasan Nihal")
|
20 |
+
|
21 |
+
if "openai_model" not in st.session_state:
|
22 |
+
st.session_state['openai_model'] = "invidia/nemotron_4"
|
23 |
+
|
24 |
+
if "messages" not in st.session_state:
|
25 |
+
st.session_state.messages = [{"role": "system", "content":"you are a helpful assistant"}]
|
26 |
+
|
27 |
+
for message in st.session_state.message:
|
28 |
+
with st.chat_message("role"):
|
29 |
+
st.markdown(message["content"])
|
30 |
+
|
31 |
+
if prompt := st.chat_input("What is up"):
|
32 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
33 |
+
with st.chat_message("user"):
|
34 |
+
st.markdown(prompt)
|
35 |
+
|
36 |
+
with st.chat_message("assistant"):
|
37 |
+
with st.spinner("The assistant is thinking... Please wait."):
|
38 |
+
stream = client.chat.completions.create(
|
39 |
+
model=st.session_state["openai_model"],
|
40 |
+
messages = st.session_state.messages,
|
41 |
+
temperature = 0.5,
|
42 |
+
top_p = 0.7,
|
43 |
+
max_tokens = 1024,
|
44 |
+
stream = true,
|
45 |
+
)
|
46 |
+
response_chunks = []
|
47 |
+
for chunk in stream:
|
48 |
+
if chunk.choices[0].delta.content is not None:
|
49 |
+
response_chunks.append(chunk.choices[0].delta.content)
|
50 |
+
respose = "".join(response_chunks)
|
51 |
+
st.markdown(response)
|
52 |
+
|
53 |
+
st.session_state.messages.append({"role":"assistant", "content": response})
|
54 |
|