--- license: apache-2.0 datasets: - argilla/ultrafeedback-binarized-preferences-cleaned language: - en base_model: - mistralai/Mistral-7B-v0.1 library_name: transformers tags: - transformers --- # Introduction: ElEmperador is an ORPO finetinue derived from the Mistral-7B-v0.1 base model. ### Inference Script: --- def generate_response(model_name, input_text, max_new_tokens=50): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) input_ids = tokenizer(input_text, return_tensors='pt').input_ids with torch.no_grad(): generated_ids = model.generate(input_ids, max_new_tokens=max_new_tokens) generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) return generated_text if __name__ == "__main__": # Set the model name from Hugging Face Hub model_name = "AINovice2005/ElEmperador" # Example model, you can change this to any other model input_text = "Hello, how are you?" output = generate_response(model_name, input_text) print(f"Input: {input_text}") print(f"Output: {output}") ---