metadata
language:
- en
license: apache-2.0
tags:
- text-generation-inference
- transformers
- unsloth
- mistral
- trl
base_model: unsloth/mistral-7b-v0.3-bnb-4bit
Uploaded model
- Developed by: jingwang
- License: apache-2.0
- Finetuned from model : unsloth/mistral-7b-v0.3-bnb-4bit
This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.
install dependencies in google colab
!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
!pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes
inference
from unsloth import FastLanguageModel
from typing import Dict, List, Tuple, Union, Any
import pandas
from tqdm import trange, tqdm
import torch
class FormatPrompt_QA_with_citation():
'''format prompt class'''
def __init__(self, eos_token:str='</s>') -> None:
self.inputs = ['context','question'] # required input fields
self.outputs = ['answer', 'citation'] # for training, and model inference output fields
self.eos_token = eos_token
def __call__(self, instance: Dict[str, Any]) -> str:
'''
function call operator
Args:
instance: dictionary with keys: 'question', 'answer'
Returns:
prompt: formatted prompt
'''
return self.formatting_prompt_func(instance)
def formatting_prompt_func(self, instance: dict) -> str:
'''format prompt for domain specific QA
note this is for fine-tuning pre-trained model,
if starting with instuct tuned model, use `tokenizer.apply_chat_template(messages)` instead
'''
assert all([ item in instance.keys() for item in self.inputs ]), logging.info(f"instance must have {self.inputs}!")
prompt = f"""<s> [INST] Context: {str(instance["context"])}\
Question: {str(instance["question"])} [/INST]
Answer: """
if ('answer' in instance):
if ('citation' in instance):
answer = {"answer":str(instance['answer']), "citation":str(instance['citation'])}
else:
answer = {"answer":str(instance['answer']), "citation":""}
prompt += json.dumps(answer, ensure_ascii=False) + self.eos_token # json format
else:
pass
return prompt