Spestly commited on
Commit
4a6f262
Β·
verified Β·
1 Parent(s): bd453e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -14,18 +14,19 @@ login(token=HF_TOKEN)
14
  # Define models
15
  MODELS = {
16
  "athena-1": {
17
- "name": "⚑ Atlas-Flash 1205",
18
  "sizes": {
19
  "1.5B": "Spestly/Atlas-R1-1.5B-Preview",
20
  },
21
- "emoji": "⚑",
22
  "experimental": True,
23
- "is_vision": False, # Enable vision support for this model
 
24
  },
25
  }
26
 
27
  # Profile pictures
28
- USER_PFP = "user.png" # Hugging Face user avatar
29
  AI_PFP = "ai_pfp.png" # Replace with the path to your AI's image or a URL
30
 
31
  class AtlasInferenceApp:
@@ -80,6 +81,7 @@ class AtlasInferenceApp:
80
  "config": {
81
  "name": f"{MODELS[model_key]['name']} {model_size}",
82
  "path": model_path,
 
83
  }
84
  })
85
  return f"βœ… {MODELS[model_key]['name']} {model_size} loaded successfully!"
@@ -91,9 +93,13 @@ class AtlasInferenceApp:
91
  return "⚠️ Please select and load a model first"
92
 
93
  try:
94
- # Add a system instruction to guide the model's behavior
95
- system_instruction = os.getenv("SYS_PROMPT")
96
- prompt = f"{system_instruction}\n\n### Instruction:\n{message}\n\n### Response:"
 
 
 
 
97
 
98
  inputs = st.session_state.current_model["tokenizer"](
99
  prompt,
 
14
  # Define models
15
  MODELS = {
16
  "athena-1": {
17
+ "name": "🦁 Atlas-Flash 1215",
18
  "sizes": {
19
  "1.5B": "Spestly/Atlas-R1-1.5B-Preview",
20
  },
21
+ "emoji": "",
22
  "experimental": True,
23
+ "is_vision": False, # Set to True to enable vision support
24
+ "system_prompt_env": "ATLAS_FLASH_1215", # Env variable for the system prompt
25
  },
26
  }
27
 
28
  # Profile pictures
29
+ USER_PFP = "https://huggingface.co/front/assets/avatars.png" # Hugging Face user avatar
30
  AI_PFP = "ai_pfp.png" # Replace with the path to your AI's image or a URL
31
 
32
  class AtlasInferenceApp:
 
81
  "config": {
82
  "name": f"{MODELS[model_key]['name']} {model_size}",
83
  "path": model_path,
84
+ "system_prompt": os.getenv(MODELS[model_key]["system_prompt_env"]), # Load system prompt from env
85
  }
86
  })
87
  return f"βœ… {MODELS[model_key]['name']} {model_size} loaded successfully!"
 
93
  return "⚠️ Please select and load a model first"
94
 
95
  try:
96
+ # Get the system prompt for the current model
97
+ system_prompt = st.session_state.current_model["config"]["system_prompt"]
98
+ if not system_prompt:
99
+ return "⚠️ System prompt not found for the selected model."
100
+
101
+ # Add the system instruction to guide the model's behavior
102
+ prompt = f"{system_prompt}\n\n### Instruction:\n{message}\n\n### Response:"
103
 
104
  inputs = st.session_state.current_model["tokenizer"](
105
  prompt,