Spaces:
Sleeping
Sleeping
# Initialize LLM | |
class LLMCallbackHandler(BaseCallbackHandler): | |
def __init__(self, log_path: Path): | |
self.log_path = log_path | |
def on_llm_start(self, serialized, prompts, **kwargs): | |
with self.log_path.open("a", encoding="utf-8") as file: | |
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n") | |
def on_llm_end(self, response: LLMResult, **kwargs): | |
generation = response.generations[-1][-1].message.content | |
with self.log_path.open("a", encoding="utf-8") as file: | |
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n") | |
llm = ChatGroq( | |
temperature=0, | |
model_name="groq/llama-3.3-70b-versatile", | |
max_tokens=500, | |
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))], | |
) |