Transformers
remyx
Inference Endpoints
SpaceLlama3.1 / config.yaml
salma-remyx's picture
Upload all files and subdirectories
a804134 verified
dataset:
align_stage_components:
- download/llava-laion-cc-sbu-558k/chat.json
- download/llava-laion-cc-sbu-558k
dataset_id: llava-lrv-spacellava
dataset_root_dir: /home/ubuntu/prismatic-vlms
finetune_stage_components:
- download/llava-v1.5-instruct/llava_v1_5_lrv_mix1008k_spacellava.json
- download/llava-v1.5-instruct
type: llava-lrv-spacellava
hf_token: .hf_token
model:
align_epochs: 1
align_global_batch_size: 4
align_learning_rate: 0.001
align_lr_scheduler_type: linear-warmup+cosine-decay
align_max_grad_norm: 1.0
align_max_steps: null
align_per_device_batch_size: 1
align_train_strategy: fsdp-shard-grad-op
align_warmup_ratio: 0.03
align_weight_decay: 0.0
arch_specifier: no-align+gelu-mlp
enable_gradient_checkpointing: true
enable_mixed_precision_training: true
finetune_epochs: 3
finetune_global_batch_size: 128
finetune_learning_rate: 2.0e-06
finetune_lr_scheduler_type: linear-warmup+cosine-decay
finetune_max_grad_norm: 1.0
finetune_max_steps: null
finetune_per_device_batch_size: 4
finetune_train_strategy: fsdp-full-shard
finetune_warmup_ratio: 0.03
finetune_weight_decay: 0.1
image_resize_strategy: letterbox
llm_backbone_id: llama3-1-8b-pure
llm_max_length: 2048
model_id: llama3-based
reduce_in_full_precision: false
type: one-stage+7b
vision_backbone_id: dinosiglip-vit-so-224px
pretrained_checkpoint: null
run_id: llava-lrv-spacellava+llama3-based+stage-finetune+x7
run_root_dir: runs
seed: 7
stage: finetune
trackers:
- jsonl
- wandb
wandb_entity: smellslikeml
wandb_project: prismatic