Jayem-11 commited on
Commit
3a2ba38
·
verified ·
1 Parent(s): ad37b24

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +20 -0
  2. main.py +29 -0
  3. qa.py +23 -0
  4. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # you will also find guides on how best to write your Dockerfile
2
+
3
+ FROM python:3.9
4
+
5
+ WORKDIR /code
6
+
7
+ COPY ./requirements.txt /code/requirements.txt
8
+
9
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
10
+
11
+ RUN useradd -m -u 1000 user
12
+ USER user
13
+ ENV HOME=/home/user \
14
+ PATH=/home/user/.local/bin:$PATH
15
+
16
+ WORKDIR $HOME/app
17
+
18
+ COPY --chown=user . $HOME/app
19
+
20
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+
4
+ app = FastAPI()
5
+
6
+
7
+ app.add_middleware(
8
+ CORSMiddleware,
9
+ allow_origins=["*"],
10
+ allow_credentials=True,
11
+ allow_methods=["*"],
12
+ allow_headers=["*"],
13
+ )
14
+
15
+
16
+ @app.get("/ping")
17
+ async def ping():
18
+ return "Hello, I am alive"
19
+
20
+
21
+ @app.post("/qa")
22
+ async def section(request: Request):
23
+
24
+ data = await request.json()
25
+
26
+ from qa import answer
27
+ answer = answer(data["Question"])
28
+
29
+ return {"Answer": answer}
qa.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import CTransformers
2
+ from langchain import PromptTemplate, LLMChain
3
+
4
+ # prepare the template we will use when prompting the AI
5
+ template = """
6
+ <s>[INST] You are a helpful, respectful and honest Medical assistant that answers
7
+ questions about the Malawian public health processes, case definitions and guidelines.
8
+ Answer the question below exactly in few words.
9
+ {question} [/INST] </s>
10
+ """
11
+
12
+ prompt = PromptTemplate(template=template, input_variables=["question"])
13
+
14
+ # load the language model
15
+ config = {'max_new_tokens': 100, 'temperature': 0}
16
+
17
+ llm = CTransformers(model='Jayem-11/OpenPipe_mistral-ft-optimized_16.gguf',
18
+ model_file="OpenPipe_mistral_optimized_merged_16.gguf",
19
+ config=config)
20
+
21
+ def answer(query):
22
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
23
+ return llm_chain.run({"question":query})
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ langchain
4
+ transformers
5
+ ctransformers>=0.2.24
6
+ sentence-transformers