Abhaykoul commited on
Commit
2aac71d
·
verified ·
1 Parent(s): 120a6e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -11
app.py CHANGED
@@ -1,54 +1,48 @@
1
- from fastapi import FastAPI, HTTPException
2
  from fastapi.responses import StreamingResponse
3
  import uvicorn
4
  from v1 import v1
5
  from v2 import v2
6
  from chatv1 import CHATv1
 
 
7
  app = FastAPI()
8
 
9
  @app.get("/Search/pro")
10
  async def v1_chat(prompt: str, model: str = "claude"):
11
  if model not in v1.AVAILABLE_MODELS:
12
  raise HTTPException(status_code=400, detail=f"Model '{model}' is not supported. Choose from {v1.AVAILABLE_MODELS}.")
13
-
14
  ai = v1(model=model)
15
-
16
  def response_generator():
17
  for chunk in ai.chat(prompt):
18
  yield f"data: {chunk}\n\n"
19
-
20
  return StreamingResponse(response_generator(), media_type="text/event-stream")
21
 
22
  @app.get("/v2/search")
23
  async def v2_chat(prompt: str):
24
  ai = v2()
25
-
26
  def response_generator():
27
  for chunk in ai.chat(prompt, stream=True):
28
  yield f"data: {chunk}\n\n"
29
-
30
  return StreamingResponse(response_generator(), media_type="text/event-stream")
 
31
  @app.get("/chatv1")
32
  async def chat_endpoint_get(
33
  user_prompt: str = Query(..., description="User's prompt"),
34
  system_prompt: Optional[str] = Query("You are a helpful AI assistant.", description="System prompt to set AI behavior")
35
- ):):
36
  ai = CHATv1()
37
  def generate():
38
-
39
  for chunk in ai.chat(user_prompt, system_prompt):
40
  yield f"data: {chunk}\n\n"
41
-
42
  return StreamingResponse(generate(), media_type="text/event-stream")
43
 
44
  @app.post("/chatv1")
45
  async def chat_endpoint_post(request: ChatRequest):
46
  ai = CHATv1()
47
  def generate():
48
-
49
  for chunk in ai.chat(request.user_prompt, request.system_prompt):
50
  yield f"data: {chunk}\n\n"
51
-
52
  return StreamingResponse(generate(), media_type="text/event-stream")
53
 
54
  if __name__ == "__main__":
 
1
+ from fastapi import FastAPI, HTTPException, Query
2
  from fastapi.responses import StreamingResponse
3
  import uvicorn
4
  from v1 import v1
5
  from v2 import v2
6
  from chatv1 import CHATv1
7
+ from typing import Optional
8
+
9
  app = FastAPI()
10
 
11
  @app.get("/Search/pro")
12
  async def v1_chat(prompt: str, model: str = "claude"):
13
  if model not in v1.AVAILABLE_MODELS:
14
  raise HTTPException(status_code=400, detail=f"Model '{model}' is not supported. Choose from {v1.AVAILABLE_MODELS}.")
 
15
  ai = v1(model=model)
 
16
  def response_generator():
17
  for chunk in ai.chat(prompt):
18
  yield f"data: {chunk}\n\n"
 
19
  return StreamingResponse(response_generator(), media_type="text/event-stream")
20
 
21
  @app.get("/v2/search")
22
  async def v2_chat(prompt: str):
23
  ai = v2()
 
24
  def response_generator():
25
  for chunk in ai.chat(prompt, stream=True):
26
  yield f"data: {chunk}\n\n"
 
27
  return StreamingResponse(response_generator(), media_type="text/event-stream")
28
+
29
  @app.get("/chatv1")
30
  async def chat_endpoint_get(
31
  user_prompt: str = Query(..., description="User's prompt"),
32
  system_prompt: Optional[str] = Query("You are a helpful AI assistant.", description="System prompt to set AI behavior")
33
+ ):
34
  ai = CHATv1()
35
  def generate():
 
36
  for chunk in ai.chat(user_prompt, system_prompt):
37
  yield f"data: {chunk}\n\n"
 
38
  return StreamingResponse(generate(), media_type="text/event-stream")
39
 
40
  @app.post("/chatv1")
41
  async def chat_endpoint_post(request: ChatRequest):
42
  ai = CHATv1()
43
  def generate():
 
44
  for chunk in ai.chat(request.user_prompt, request.system_prompt):
45
  yield f"data: {chunk}\n\n"
 
46
  return StreamingResponse(generate(), media_type="text/event-stream")
47
 
48
  if __name__ == "__main__":