small distill model

from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer

model_name = "BestPechenka/erida-luminia"

custom_config = {
    "rope_scaling": {
        "type": "llama3",
        "factor": 8.0,
        "high_freq_factor": 4.0,
        "low_freq_factor": 1.0,
        "original_max_position_embeddings": 8192,
        "rope_type": "llama3"
    }
}

config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
config.update(custom_config)

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    config=config,
    trust_remote_code=True
)

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

saiga_prompt = """Напишите подходящий ответ!

{dialog}

Ассистент: """

dialog = []


def format_dialog(dialog):
    formatted_dialog = ""
    for turn in dialog:
        if turn["role"] == "user":
            formatted_dialog += f"Пользователь: {turn['content']}\n\n"
        elif turn["role"] == "bot":
            formatted_dialog += f"Ассистент: {turn['content']}\n\n"
    return formatted_dialog


def generate_text(prompt):
    inputs = tokenizer([saiga_prompt.format(dialog=format_dialog(dialog) + prompt)], return_tensors="pt").to(
        model.device)
    outputs = model.generate(**inputs, max_new_tokens=
    64)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)


while True:
    user_input = input(">>> ")
    dialog.append({"role": "bot", "content": "Привет! Меня зовут erida-luminia."})

    dialog.append({"role": "user", "content": user_input})

    generated_text = generate_text(user_input)
    print(generated_text)

    dialog.append({"role": "bot", "content": generated_text})
Downloads last month
9
Safetensors
Model size
4.65B params
Tensor type
FP16
·
F32
·
U8
·
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.