Spaces:
Sleeping
Sleeping
Upload score_generator.py
Browse files- score_generator.py +31 -0
score_generator.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_cpp import Llama
|
2 |
+
from yaml import load, Loader
|
3 |
+
from prompting import add_prompt
|
4 |
+
|
5 |
+
params = load(open('params.yml').read(), Loader=Loader)
|
6 |
+
|
7 |
+
def get_score(
|
8 |
+
paragraph_1: str,
|
9 |
+
paragraph_2: str,
|
10 |
+
instruction: str=params['instruction'],
|
11 |
+
model_path: str=params['model_file_path'],
|
12 |
+
context_size: int=params['context_size'],
|
13 |
+
max_tokens: int=params['max_tokens'],
|
14 |
+
temperature: float=params['temperature'],
|
15 |
+
top_p: float=params['top_p'],
|
16 |
+
echo: bool=params['echo'],
|
17 |
+
):
|
18 |
+
try:
|
19 |
+
prompt = add_prompt(paragraph_1, paragraph_2, instruction)
|
20 |
+
llm = Llama(model_path=model_path, n_ctx=context_size)
|
21 |
+
output = llm(
|
22 |
+
prompt,
|
23 |
+
max_tokens=max_tokens,
|
24 |
+
temperature=temperature,
|
25 |
+
top_p=top_p,
|
26 |
+
echo=echo,
|
27 |
+
stop = '#'
|
28 |
+
)
|
29 |
+
return output["choices"][0]["text"]
|
30 |
+
except Exception as e:
|
31 |
+
print(f'An error occured in the function `get_score`:\n{e}')
|