Gokulnath2003 commited on
Commit
eb4a24d
β€’
1 Parent(s): 8ff67d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -39
app.py CHANGED
@@ -2,11 +2,10 @@ import numpy as np
2
  import streamlit as st
3
  from openai import OpenAI
4
  import os
5
- import sys
6
- from dotenv import load_dotenv, dotenv_values
7
  load_dotenv()
8
 
9
- # initialize the client
10
  client = OpenAI(
11
  base_url="https://api-inference.huggingface.co/v1",
12
  api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
@@ -20,42 +19,33 @@ model_links = {
20
  # Pull info about the model to display
21
  model_info = {
22
  "Meta-Llama-3-8B": {
23
- 'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n
24
- \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
25
- 'logo': 'Llama_logo.png'
 
26
  }
27
  }
28
 
29
- # Random dog images for error message
30
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg", "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg"]
31
-
32
  def reset_conversation():
33
- '''Resets Conversation'''
34
  st.session_state.conversation = []
35
  st.session_state.messages = []
36
  return None
37
 
38
- # Define the available models
39
- models = [key for key in model_links.keys()]
40
-
41
- # Create the sidebar with the dropdown for model selection
42
- selected_model = st.sidebar.selectbox("Select Model", models)
43
-
44
- # Custom description for SciMom
45
- st.sidebar.write("Built for my mom, with love. This model is pretrained with textbooks of Science NCERT.")
46
- st.sidebar.write("Model used: Meta Llama, trained using: Docker AutoTrain.")
47
 
48
- # Create a temperature slider
49
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
 
50
 
51
- # Add reset button to clear conversation
52
- st.sidebar.button('Reset Chat', on_click=reset_conversation)
 
53
 
54
- # Create model description
55
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
56
- st.sidebar.markdown(model_info[selected_model]['description'])
57
- st.sidebar.image(model_info[selected_model]['logo'])
58
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
59
 
60
  if "prev_option" not in st.session_state:
61
  st.session_state.prev_option = selected_model
@@ -68,12 +58,6 @@ if st.session_state.prev_option != selected_model:
68
  # Pull in the model we want to use
69
  repo_id = model_links[selected_model]
70
 
71
- st.subheader(f'AI - {selected_model}')
72
-
73
- # Set a default model
74
- if selected_model not in st.session_state:
75
- st.session_state[selected_model] = model_links[selected_model]
76
-
77
  # Initialize chat history
78
  if "messages" not in st.session_state:
79
  st.session_state.messages = []
@@ -84,7 +68,7 @@ for message in st.session_state.messages:
84
  st.markdown(message["content"])
85
 
86
  # Accept user input
87
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
88
  # Display user message in chat message container
89
  with st.chat_message("user"):
90
  st.markdown(prompt)
@@ -99,17 +83,15 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
99
  {"role": m["role"], "content": m["content"]}
100
  for m in st.session_state.messages
101
  ],
102
- temperature=temp_values,
103
  stream=True,
104
  max_tokens=3000,
105
  )
106
  response = st.write_stream(stream)
107
 
108
  except Exception as e:
109
- response = "πŸ˜΅β€πŸ’« Looks like something went wrong! Here's a random pic of a 🐢:"
110
  st.write(response)
111
- random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
112
- st.image(random_dog_pick)
113
  st.write("This was the error message:")
114
  st.write(e)
115
 
 
2
  import streamlit as st
3
  from openai import OpenAI
4
  import os
5
+ from dotenv import load_dotenv
 
6
  load_dotenv()
7
 
8
+ # Initialize the OpenAI client
9
  client = OpenAI(
10
  base_url="https://api-inference.huggingface.co/v1",
11
  api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
 
19
  # Pull info about the model to display
20
  model_info = {
21
  "Meta-Llama-3-8B": {
22
+ 'description': """The Llama (3) model is a **Large Language Model (LLM)** designed to assist with question and answer interactions.\n
23
+ \nThis model was created by Meta's AI team and has over 8 billion parameters.\n
24
+ **Training**: The model was fine-tuned on science textbooks from the NCERT curriculum using Docker AutoTrain to ensure it can provide relevant and accurate responses in the education domain.\n
25
+ **Purpose**: This version of Llama has been trained specifically for educational purposes, focusing on answering science-related queries in a clear and simple manner to help students and teachers alike.\n"""
26
  }
27
  }
28
 
29
+ # Reset the conversation
 
 
30
  def reset_conversation():
 
31
  st.session_state.conversation = []
32
  st.session_state.messages = []
33
  return None
34
 
35
+ # App title and description
36
+ st.title("Sci-Mom πŸ‘©β€πŸ« ")
37
+ st.subheader("AI chatbot for Solving your doubts πŸ“š :)")
 
 
 
 
 
 
38
 
39
+ # Custom description for SciMom in the sidebar
40
+ st.sidebar.write("Built for my mom, with love ❀️. This model is pretrained with textbooks of Science NCERT.")
41
+ st.sidebar.write("Base-Model used: Meta Llama, trained using: Docker AutoTrain.")
42
 
43
+ # Add technical details in the sidebar
44
+ st.sidebar.markdown(model_info["Meta-Llama-3-8B"]['description'])
45
+ st.sidebar.markdown("*By Gokulnath β™” *")
46
 
47
+ # If model selection was needed (now removed)
48
+ selected_model = "Meta-Llama-3-8B" # Only one model remains
 
 
 
49
 
50
  if "prev_option" not in st.session_state:
51
  st.session_state.prev_option = selected_model
 
58
  # Pull in the model we want to use
59
  repo_id = model_links[selected_model]
60
 
 
 
 
 
 
 
61
  # Initialize chat history
62
  if "messages" not in st.session_state:
63
  st.session_state.messages = []
 
68
  st.markdown(message["content"])
69
 
70
  # Accept user input
71
+ if prompt := st.chat_input("Ask Scimom!"):
72
  # Display user message in chat message container
73
  with st.chat_message("user"):
74
  st.markdown(prompt)
 
83
  {"role": m["role"], "content": m["content"]}
84
  for m in st.session_state.messages
85
  ],
86
+ temperature=0.5, # Default temperature setting
87
  stream=True,
88
  max_tokens=3000,
89
  )
90
  response = st.write_stream(stream)
91
 
92
  except Exception as e:
93
+ response = "πŸ˜΅β€πŸ’« Something went wrong. Please try again later."
94
  st.write(response)
 
 
95
  st.write("This was the error message:")
96
  st.write(e)
97