Qwen2-4x1.5B-v2 / mergekit_moe_config.yml
djuna's picture
Upload folder using huggingface_hub
1ad196f verified
raw
history blame contribute delete
No virus
1 kB
gate_mode: hidden
architecture: qwen
dtype: bfloat16
experts_per_token: 2
base_model: M4-ai/Hercules-5.0-Qwen2-1.5B
experts:
- source_model: cognitivecomputations/dolphin-2.9.3-qwen2-1.5b
positive_prompts:
- "explain"
- "describe"
- "define"
- "help"
- "assist"
- source_model: Replete-AI/Qwen2-1.5b-Instruct-Replete-Adapted
positive_prompts:
- "code"
- "algorithm"
- "programming"
- "development"
- "software"
- "framework"
- source_model: M4-ai/Hercules-5.0-Qwen2-1.5B
positive_prompts:
- "rewrite"
- "paraphrase"
- "translate"
- "reword"
- source_model: d-llm/Qwen2-1.5B-Instruct-orpo
positive_prompts:
- "summarize"
- "shorten"
- "condense"
- "tldr"
shared_experts:
- source_model: M4-ai/Hercules-5.0-Qwen2-1.5B
positive_prompts: # required by Qwen MoE for "hidden" gate mode, otherwise not allowed
- "assistant"
- "chat"
# (optional, but recommended:)
residual_scale: 0.1