Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
refactor: centralize model configuration and improve management
Browse files
app.py
CHANGED
@@ -12,10 +12,23 @@ import tempfile
|
|
12 |
import shlex
|
13 |
import shutil
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
allowed_medias = [
|
21 |
".png",
|
@@ -154,14 +167,13 @@ YOUR FFMPEG COMMAND:
|
|
154 |
print(msg["content"])
|
155 |
print("=====================\n")
|
156 |
|
157 |
-
if model_choice
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
model = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
165 |
|
166 |
completion = client.chat.completions.create(
|
167 |
model=model,
|
|
|
12 |
import shlex
|
13 |
import shutil
|
14 |
|
15 |
+
# Supported models configuration
|
16 |
+
MODELS = {
|
17 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct": {
|
18 |
+
"base_url": "https://api-inference.huggingface.co/v1/",
|
19 |
+
"env_key": "HF_TOKEN"
|
20 |
+
},
|
21 |
+
"deepseek-ai/DeepSeek-V3": {
|
22 |
+
"base_url": "https://api.deepseek.com/v1",
|
23 |
+
"env_key": "DEEPSEEK_API_KEY"
|
24 |
+
}
|
25 |
+
}
|
26 |
+
|
27 |
+
# Initialize client with first available model
|
28 |
+
client = OpenAI(
|
29 |
+
base_url=next(iter(MODELS.values()))["base_url"],
|
30 |
+
api_key=os.environ[next(iter(MODELS.values()))["env_key"]]
|
31 |
+
)
|
32 |
|
33 |
allowed_medias = [
|
34 |
".png",
|
|
|
167 |
print(msg["content"])
|
168 |
print("=====================\n")
|
169 |
|
170 |
+
if model_choice not in MODELS:
|
171 |
+
raise ValueError(f"Model {model_choice} is not supported")
|
172 |
+
|
173 |
+
model_config = MODELS[model_choice]
|
174 |
+
client.base_url = model_config["base_url"]
|
175 |
+
client.api_key = os.environ[model_config["env_key"]]
|
176 |
+
model = "deepseek-chat" if "deepseek" in model_choice.lower() else model_choice
|
|
|
177 |
|
178 |
completion = client.chat.completions.create(
|
179 |
model=model,
|