Spaces:
Running
Running
prithvirajpawar
commited on
Commit
·
bb56e41
1
Parent(s):
0affb3f
dockerfile and helpmate
Browse files- Dockerfile +18 -6
- helpmate_ai.py +25 -0
Dockerfile
CHANGED
@@ -1,14 +1,26 @@
|
|
1 |
# Use Python base image
|
2 |
FROM python:3.9-slim
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
# Set the working directory
|
5 |
-
WORKDIR /app
|
6 |
|
7 |
-
# Copy
|
8 |
-
COPY . /app
|
9 |
|
10 |
-
|
11 |
-
|
12 |
|
13 |
# Install dependencies
|
14 |
RUN pip install --no-cache-dir -r requirements.txt
|
@@ -17,4 +29,4 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
17 |
EXPOSE 8000
|
18 |
|
19 |
# Command to run FastAPI with Uvicorn
|
20 |
-
CMD ["uvicorn", "app
|
|
|
1 |
# Use Python base image
|
2 |
FROM python:3.9-slim
|
3 |
|
4 |
+
# Set up a new user named "user" with user ID 1000
|
5 |
+
RUN useradd -m -u 1000 user
|
6 |
+
|
7 |
+
# Switch to the "user" user
|
8 |
+
USER user
|
9 |
+
|
10 |
+
# Set home to the user's home directory
|
11 |
+
ENV HOME=/home/user \
|
12 |
+
PATH=/home/user/.local/bin:$PATH
|
13 |
+
|
14 |
+
# Set the working directory to the user's home directory
|
15 |
+
WORKDIR $HOME/app
|
16 |
# Set the working directory
|
17 |
+
# WORKDIR /app
|
18 |
|
19 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
20 |
+
COPY --chown=user . $HOME/app
|
21 |
|
22 |
+
# Copy project files to the container
|
23 |
+
# COPY . /app
|
24 |
|
25 |
# Install dependencies
|
26 |
RUN pip install --no-cache-dir -r requirements.txt
|
|
|
29 |
EXPOSE 8000
|
30 |
|
31 |
# Command to run FastAPI with Uvicorn
|
32 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
helpmate_ai.py
CHANGED
@@ -177,6 +177,31 @@ def rerank_with_cross_encoder(query, results_df, top_k=3):
|
|
177 |
# top_docs = rerank_with_cross_encoder(results_df)
|
178 |
# top_docs
|
179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
|
182 |
|
|
|
177 |
# top_docs = rerank_with_cross_encoder(results_df)
|
178 |
# top_docs
|
179 |
|
180 |
+
def generate_response(query, top_docs):
|
181 |
+
"""
|
182 |
+
Generate a response using GPT-3.5's ChatCompletion based on the user query and retrieved information.
|
183 |
+
"""
|
184 |
+
messages = f"""
|
185 |
+
Remember your system message and that you are a helpful assistant that extracts relevant information from insurance policy documents to answer user queries accurately and concisely.
|
186 |
+
Your task is to extract and present relevant information from the policy documents to answer the user’s query.
|
187 |
+
The document excerpts are provided in the dataframe '{top_docs}', with the actual policy text in the 'documents' column and metadata (page numbers) in the 'metadata' column.
|
188 |
+
The user input is: '{query}'
|
189 |
+
"""
|
190 |
+
|
191 |
+
# response = openai.chat.completions.create (
|
192 |
+
# model="gpt
|
193 |
+
### Your Task:-3.5-turbo",
|
194 |
+
# messages=messages
|
195 |
+
# )
|
196 |
+
conversation = [{"role": "user", "parts": messages}]
|
197 |
+
|
198 |
+
return conversation #response.choices[0].message.content.split('\n')
|
199 |
+
|
200 |
+
# response = generate_response(query, top_docs)
|
201 |
+
# print(query + '\n')
|
202 |
+
# print("\n".join(response))
|
203 |
+
|
204 |
+
|
205 |
|
206 |
|
207 |
|