Abhaykoul commited on
Commit
129b703
·
verified ·
1 Parent(s): adff8c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -116
app.py CHANGED
@@ -1,126 +1,13 @@
1
- import requests
2
- import json
3
- from typing import Generator
4
  from fastapi import FastAPI, HTTPException
5
  from fastapi.responses import StreamingResponse
6
  import uvicorn
7
- from dotenv import load_dotenv
8
- import os
9
- import re
10
-
11
- # Load environment variables from .env file
12
- load_dotenv()
13
 
14
  app = FastAPI()
15
 
16
- class v1:
17
- """
18
- A class to interact with the v1 AI API.
19
- """
20
-
21
- AVAILABLE_MODELS = ["llama", "claude"]
22
-
23
- def __init__(
24
- self,
25
- model: str = "claude",
26
- timeout: int = 300,
27
- proxies: dict = {},
28
- ):
29
- """
30
- Initializes the v1 AI API with given parameters.
31
- Args:
32
- model (str, optional): The AI model to use for text generation. Defaults to "claude".
33
- Options: "llama", "claude".
34
- timeout (int, optional): Http request timeout. Defaults to 30.
35
- proxies (dict, optional): Http request proxies. Defaults to {}.
36
- """
37
- if model not in self.AVAILABLE_MODELS:
38
- raise ValueError(f"Model '{model}' is not supported. Choose from {self.AVAILABLE_MODELS}.")
39
-
40
- self.session = requests.Session()
41
- self.api_endpoint = os.getenv("API_ENDPOINT")
42
- self.timeout = timeout
43
- self.model = model
44
- self.device_token = self.get_device_token()
45
-
46
- self.session.headers.update(
47
- {
48
- "Content-Type": "application/json",
49
- "Accept": "text/event-stream",
50
- }
51
- )
52
- self.session.proxies = proxies
53
-
54
- def get_device_token(self) -> str:
55
- device_token_url = os.getenv("DEVICE_TOKEN_URL")
56
- headers = {"Content-Type": "application/json; charset=utf-8"}
57
- data = {}
58
- response = requests.post(
59
- device_token_url, headers=headers, data=json.dumps(data)
60
- )
61
-
62
- if response.status_code == 200:
63
- device_token_data = response.json()
64
- return device_token_data["sessionToken"]
65
- else:
66
- raise Exception(
67
- f"Failed to get device token - ({response.status_code}, {response.reason}) - {response.text}"
68
- )
69
-
70
- def ask(self, prompt: str) -> Generator[str, None, None]:
71
- search_data = {"query": prompt, "deviceToken": self.device_token}
72
-
73
- response = self.session.post(
74
- self.api_endpoint, json=search_data, stream=True, timeout=self.timeout
75
- )
76
- if not response.ok:
77
- raise Exception(
78
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
79
- )
80
-
81
- buffer = ""
82
- for line in response.iter_lines(decode_unicode=True):
83
- if line:
84
- if line.startswith("data: "):
85
- data_str = line[6:]
86
- try:
87
- data = json.loads(data_str)
88
- if data['type'] == 'chunk':
89
- model = data['model']
90
- if (self.model == "llama" and model == 'OPENROUTER_LLAMA_3') or \
91
- (self.model == "claude" and model == 'OPENROUTER_CLAUDE'):
92
- content = data['chunk']['content']
93
- if content:
94
- buffer += content
95
- # Check if we have a complete line or paragraph
96
- lines = buffer.split('\n')
97
- if len(lines) > 1:
98
- for complete_line in lines[:-1]:
99
- yield self.format_text(complete_line) + '\n'
100
- buffer = lines[-1]
101
- except KeyError:
102
- pass
103
- except json.JSONDecodeError:
104
- pass
105
-
106
- # Yield any remaining content in the buffer
107
- if buffer:
108
- yield self.format_text(buffer)
109
-
110
- yield "[DONE]"
111
-
112
- def format_text(self, text: str) -> str:
113
- # Convert *text* to <i>text</i> for italic
114
- text = re.sub(r'\*(.*?)\*', r'<i>\1</i>', text)
115
- return text
116
-
117
- def chat(self, prompt: str) -> Generator[str, None, None]:
118
- """Stream responses as string chunks"""
119
- return self.ask(prompt)
120
-
121
-
122
  @app.get("/Search/pro")
123
- async def chat(prompt: str, model: str = "claude"):
124
  if model not in v1.AVAILABLE_MODELS:
125
  raise HTTPException(status_code=400, detail=f"Model '{model}' is not supported. Choose from {v1.AVAILABLE_MODELS}.")
126
 
@@ -132,6 +19,15 @@ async def chat(prompt: str, model: str = "claude"):
132
 
133
  return StreamingResponse(response_generator(), media_type="text/event-stream")
134
 
 
 
 
 
 
 
 
 
 
135
 
136
  if __name__ == "__main__":
137
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
 
 
 
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.responses import StreamingResponse
3
  import uvicorn
4
+ from v1 import v1
5
+ from v2 import v2
 
 
 
 
6
 
7
  app = FastAPI()
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  @app.get("/Search/pro")
10
+ async def v1_chat(prompt: str, model: str = "claude"):
11
  if model not in v1.AVAILABLE_MODELS:
12
  raise HTTPException(status_code=400, detail=f"Model '{model}' is not supported. Choose from {v1.AVAILABLE_MODELS}.")
13
 
 
19
 
20
  return StreamingResponse(response_generator(), media_type="text/event-stream")
21
 
22
+ @app.get("/v2/search")
23
+ async def v2_chat(prompt: str):
24
+ ai = v2()
25
+
26
+ def response_generator():
27
+ for chunk in ai.chat(prompt, stream=True):
28
+ yield f"data: {chunk}\n\n"
29
+
30
+ return StreamingResponse(response_generator(), media_type="text/event-stream")
31
 
32
  if __name__ == "__main__":
33
  uvicorn.run(app, host="0.0.0.0", port=8000)