{"architectures": ["LLaMAModel"], "hidden_size": 4096, "intermediate_size": 11008, "num_attention_heads": 32, "num_hidden_layers": 28, "vocab_size": 32000, "pad_token_id": 0, "eos_token_id": 2} |
{"architectures": ["LLaMAModel"], "hidden_size": 4096, "intermediate_size": 11008, "num_attention_heads": 32, "num_hidden_layers": 28, "vocab_size": 32000, "pad_token_id": 0, "eos_token_id": 2} |