Yhhxhfh commited on
Commit
eb10c67
1 Parent(s): 3488f0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -8,6 +8,10 @@ import gradio as gr
8
  import os
9
  import spaces
10
  from dotenv import load_dotenv
 
 
 
 
11
 
12
  load_dotenv()
13
 
@@ -117,22 +121,23 @@ async def process_message(message):
117
  for model, response in unique_responses.items():
118
  formatted_response += f"**{model}:**\n{response}\n\n"
119
 
120
- curl_command = f"""
121
- curl -X POST -H "Content-Type: application/json" \\
122
- -d '{{"message": "{message}"}}' \\
123
- http://localhost:7860/generate
124
- """
125
- return formatted_response, curl_command
126
 
 
 
 
 
 
 
127
 
128
  iface = gr.Interface(
129
  fn=process_message,
130
  inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
131
- outputs=[gr.Markdown(), gr.Textbox(label="cURL command")],
132
  title="Multi-Model LLM API",
133
  description="Enter a message and get responses from multiple LLMs.",
134
  )
135
 
136
  if __name__ == "__main__":
137
- port = int(os.environ.get("PORT", 7860))
138
  iface.launch(server_port=port)
 
8
  import os
9
  import spaces
10
  from dotenv import load_dotenv
11
+ from fastapi import FastAPI, Request
12
+ from fastapi.responses import JSONResponse
13
+
14
+ app = FastAPI()
15
 
16
  load_dotenv()
17
 
 
121
  for model, response in unique_responses.items():
122
  formatted_response += f"**{model}:**\n{response}\n\n"
123
 
124
+ return formatted_response
 
 
 
 
 
125
 
126
+ @app.post("/generate_multimodel")
127
+ async def api_generate_multimodel(request: Request):
128
+ data = await request.json()
129
+ message = data["message"]
130
+ formatted_response = await process_message(message)
131
+ return JSONResponse({"response": formatted_response})
132
 
133
  iface = gr.Interface(
134
  fn=process_message,
135
  inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
136
+ outputs=gr.Markdown(),
137
  title="Multi-Model LLM API",
138
  description="Enter a message and get responses from multiple LLMs.",
139
  )
140
 
141
  if __name__ == "__main__":
142
+ port = int(os.environ.get("PORT", 7867))
143
  iface.launch(server_port=port)