llama-3.1-swallow-gen-8b-v0.1 / mergekit_config.yml
AELLM's picture
Upload folder using huggingface_hub
9420eab verified
raw
history blame
373 Bytes
models:
- model: tokyotech-llm/Llama-3.1-Swallow-8B-Instruct-v0.1
# No parameters necessary for base model
- model: BAAI/Infinity-Instruct-7M-Gen-Llama3_1-8B
parameters:
density: 0.5
weight: 1
merge_method: dare_ties
base_model: tokyotech-llm/Llama-3.1-Swallow-8B-Instruct-v0.1
parameters:
int8_mask: true
tokenizer_source: union
dtype: bfloat16