Llama3.1-TheiaFire-DarkFusion-8B / mergekit_config.yml
ZeroXClem's picture
Upload folder using huggingface_hub
1b7b089 verified
raw
history blame
1 kB
models:
- model: Chainbase-Labs/Theia-Llama-3.1-8B-v1
parameters:
density: 0.4 # Balancing technical vision and crypto capabilities
weight: 0.3
- model: EpistemeAI/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO
parameters:
density: 0.6 # Giving priority to code-based reasoning and agentic capabilities
weight: 0.4
- model: aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored
parameters:
density: 0.5 # Focus on creativity and uncensored roleplay flexibility
weight: 0.2
- model: DeepAutoAI/ldm_soup_Llama-3.1-8B-Inst
parameters:
density: 0.5 # Blending latent diffusion capabilities for unseen tasks
weight: 0.1
merge_method: ties
base_model: DeepAutoAI/ldm_soup_Llama-3.1-8B-Inst # Using the ldm_soup as a base for optimal diffusion of strengths
dtype: bfloat16
parameters:
normalize: true # Ensuring consistent scaling of layer outputs
out_dtype: float16 # Maintaining efficiency and precision