Spaces:
Runtime error
Runtime error
dron3flyv3r
commited on
Commit
·
a62eb6d
1
Parent(s):
40d4e4c
update
Browse files
app.py
CHANGED
@@ -15,21 +15,23 @@ def transcript_audio(audio_file) -> str:
|
|
15 |
|
16 |
|
17 |
def summarize_text(text: str, bullet_points: int, conclusion: bool) -> str:
|
18 |
-
llm_model = "meta-llama/Meta-Llama-3-
|
19 |
api = InferenceClient(llm_model, token=HUGGINGFACE_API_KEY)
|
20 |
tokenizer = AutoTokenizer.from_pretrained(llm_model, token=HUGGINGFACE_API_KEY)
|
21 |
if conclusion:
|
22 |
-
user_chat = f"Summarize the following text into {bullet_points} bullet points and a conclusion
|
23 |
else:
|
24 |
user_chat = (
|
25 |
-
f"Summarize the following text into {bullet_points} bullet points
|
26 |
)
|
27 |
chat = [
|
|
|
28 |
{"role": "user", "content": user_chat},
|
29 |
]
|
30 |
prompt = tokenizer.apply_chat_template(
|
31 |
chat, tokenize=False, add_generation_prompt=True
|
32 |
)
|
|
|
33 |
summary = api.text_generation(prompt, max_new_tokens=250, do_sample=True)
|
34 |
print(summary)
|
35 |
return summary
|
|
|
15 |
|
16 |
|
17 |
def summarize_text(text: str, bullet_points: int, conclusion: bool) -> str:
|
18 |
+
llm_model = "meta-llama/Meta-Llama-3-70B-Instruct"
|
19 |
api = InferenceClient(llm_model, token=HUGGINGFACE_API_KEY)
|
20 |
tokenizer = AutoTokenizer.from_pretrained(llm_model, token=HUGGINGFACE_API_KEY)
|
21 |
if conclusion:
|
22 |
+
user_chat = f"Summarize the following text, into {bullet_points} bullet points and a conclusion:\n{text}"
|
23 |
else:
|
24 |
user_chat = (
|
25 |
+
f"Summarize the following text, into {bullet_points} bullet points:\n{text}"
|
26 |
)
|
27 |
chat = [
|
28 |
+
{"role": "system", "content": "You are a Meeting Summarizer AI. You well help summarize the text into bullet points and a conclusion. Please return a Markdown formatted text. Remember to give it a title."},
|
29 |
{"role": "user", "content": user_chat},
|
30 |
]
|
31 |
prompt = tokenizer.apply_chat_template(
|
32 |
chat, tokenize=False, add_generation_prompt=True
|
33 |
)
|
34 |
+
|
35 |
summary = api.text_generation(prompt, max_new_tokens=250, do_sample=True)
|
36 |
print(summary)
|
37 |
return summary
|