{ "architectures": [ "UnifiedEmbedderModel" ], "embedding_dim": 1536, "model_id": "openai/clip-vit-large-patch14", "model_type": "unified_embedder", "torch_dtype": "float32", "transformers_version": "4.47.1" }