Text Generation
Transformers
Safetensors
llama
Merge
model_fusion
TIES
Llama3.1
crypto
blockchain
coding_assistant
creative_writing
roleplaying
uncensored
latent_diffusion
long_context
agentic_AI
multi_domain
research
instruction-following
technical_reasoning
task_generalization
AI_tools
GPT
conversational
text-generation-inference
Inference Endpoints
models: | |
- model: Chainbase-Labs/Theia-Llama-3.1-8B-v1 | |
parameters: | |
density: 0.4 # Balancing technical vision and crypto capabilities | |
weight: 0.3 | |
- model: EpistemeAI/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO | |
parameters: | |
density: 0.6 # Giving priority to code-based reasoning and agentic capabilities | |
weight: 0.4 | |
- model: aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored | |
parameters: | |
density: 0.5 # Focus on creativity and uncensored roleplay flexibility | |
weight: 0.2 | |
- model: DeepAutoAI/ldm_soup_Llama-3.1-8B-Inst | |
parameters: | |
density: 0.5 # Blending latent diffusion capabilities for unseen tasks | |
weight: 0.1 | |
merge_method: ties | |
base_model: DeepAutoAI/ldm_soup_Llama-3.1-8B-Inst # Using the ldm_soup as a base for optimal diffusion of strengths | |
dtype: bfloat16 | |
parameters: | |
normalize: true # Ensuring consistent scaling of layer outputs | |
out_dtype: float16 # Maintaining efficiency and precision | |