nikunjcepatel commited on
Commit
991d86a
·
verified ·
1 Parent(s): 4394f5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -31
app.py CHANGED
@@ -3,39 +3,59 @@ import requests
3
  import json
4
  import os
5
 
6
- # Retrieve the Open Router API Key from the Space secrets
7
- API_KEY = os.getenv("OpenRounter_API_KEY")
8
 
9
- def generate_text(input_text):
10
- response = requests.post(
11
- url="https://openrouter.ai/api/v1/chat/completions",
12
- headers={
13
- "Authorization": f"Bearer {API_KEY}"
14
- },
15
- data=json.dumps({
16
- "model": "openai/gpt-4o-mini-2024-07-18", # Optional
17
- "messages": [{"role": "user", "content": input_text}],
18
- "top_p": 1,
19
- "temperature": 1,
20
- "frequency_penalty": 0,
21
- "presence_penalty": 0,
22
- "repetition_penalty": 1,
23
- "top_k": 0,
24
- })
25
- )
26
-
27
- # Handle errors
28
- if response.status_code != 200:
29
- return f"Error: {response.status_code}, {response.text}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- # Parse and return the content of the response
32
- try:
33
- response_json = response.json()
34
- return response_json.get("choices", [{}])[0].get("message", {}).get("content", "No content returned.")
35
- except json.JSONDecodeError:
36
- return "Error: Unable to parse response."
37
 
38
- # Create Gradio interface
39
- iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
40
 
41
  iface.launch()
 
3
  import json
4
  import os
5
 
6
+ # Retrieve the OpenRouter API Key from the Space secrets
7
+ API_KEY = os.getenv("OpenRouter_API_KEY")
8
 
9
+ # Define available models for selection
10
+ MODEL_OPTIONS = [
11
+ "openai/gpt-4o-mini-2024-07-18",
12
+ "openai/gpt-4",
13
+ "anthropic/claude-2",
14
+ "cohere/command-xlarge-nightly"
15
+ ]
16
+
17
+ def generate_comparisons(input_text, selected_models):
18
+ results = {}
19
+ for model in selected_models:
20
+ response = requests.post(
21
+ url="https://openrouter.ai/api/v1/chat/completions",
22
+ headers={
23
+ "Authorization": f"Bearer {API_KEY}",
24
+ "Content-Type": "application/json"
25
+ },
26
+ data=json.dumps({
27
+ "model": model, # Use the current model
28
+ "messages": [{"role": "user", "content": input_text}],
29
+ "top_p": 1,
30
+ "temperature": 1,
31
+ "frequency_penalty": 0,
32
+ "presence_penalty": 0,
33
+ "repetition_penalty": 1,
34
+ "top_k": 0,
35
+ })
36
+ )
37
+
38
+ # Parse the response
39
+ if response.status_code == 200:
40
+ try:
41
+ response_json = response.json()
42
+ results[model] = response_json.get("choices", [{}])[0].get("message", {}).get("content", "No content returned.")
43
+ except json.JSONDecodeError:
44
+ results[model] = "Error: Unable to parse response."
45
+ else:
46
+ results[model] = f"Error: {response.status_code}, {response.text}"
47
 
48
+ return results
 
 
 
 
 
49
 
50
+ # Create Gradio interface with multiple model selection
51
+ iface = gr.Interface(
52
+ fn=generate_comparisons,
53
+ inputs=[
54
+ gr.Textbox(lines=2, label="Input Text", placeholder="Enter your query here"),
55
+ gr.CheckboxGroup(choices=MODEL_OPTIONS, label="Select Models", value=[MODEL_OPTIONS[0]])
56
+ ],
57
+ outputs=gr.JSON(label="Model Comparisons"),
58
+ title="Compare Outputs from Multiple Models"
59
+ )
60
 
61
  iface.launch()