|
from typing import Optional, Dict, Any, Union, Generator |
|
from huggingface_hub import InferenceClient |
|
from openai import OpenAI |
|
from prompt_template import PromptTemplate |
|
|
|
class AIAssistant: |
|
def __init__(self, client: Union[OpenAI, InferenceClient], model: str): |
|
self.client = client |
|
self.model = model |
|
|
|
def generate_response( |
|
self, |
|
prompt_template: PromptTemplate, |
|
messages: list[Dict[str, str]], |
|
generation_params: Optional[Dict] = None, |
|
stream: bool = True, |
|
) -> Generator[str, None, None]: |
|
""" |
|
Generate LLM response using the provided template and parameters. |
|
|
|
Args: |
|
prompt_template: PromptTemplate object containing template and parameters |
|
messages: List of message dictionaries with role and content |
|
generation_params: Optional generation parameters (overrides template parameters) |
|
stream: Whether to stream the response |
|
|
|
Yields: |
|
Streamed response text |
|
""" |
|
params = generation_params or prompt_template.parameters |
|
|
|
|
|
formatted_messages = [ |
|
{"role": msg["role"], "content": str(msg["content"])} |
|
for msg in messages |
|
] |
|
|
|
try: |
|
completion = self.client.chat.completions.create( |
|
model=self.model, |
|
messages=formatted_messages, |
|
stream=stream, |
|
**params |
|
) |
|
|
|
if stream: |
|
response = "" |
|
for chunk in completion: |
|
if chunk.choices[0].delta.content is not None: |
|
response += chunk.choices[0].delta.content |
|
yield response |
|
else: |
|
return completion.choices[0].message.content |
|
|
|
except Exception as e: |
|
yield f"Error generating response: {str(e)}" |