intfloat commited on
Commit
d553fa4
·
verified ·
1 Parent(s): 5a522de

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +54 -42
README.md CHANGED
@@ -11,14 +11,11 @@ language:
11
  - pl
12
  - tr
13
  - fr
14
- library_name: transformers
15
  license: mit
16
- pipeline_tag: image-feature-extraction
17
  ---
18
-
19
  ## mmE5-mllama-11b-instruct
20
 
21
- [mmE5: Improving Multimodal Multilingual Embeddings via High-quality Synthetic Data](https://arxiv.org/abs/2502.08468.pdf). Haonan Chen, Liang Wang, Nan Yang, Yutao Zhu, Ziliang Zhao, Furu Wei, Zhicheng Dou, arXiv 2025
22
 
23
  This model is trained based on [Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision).
24
 
@@ -44,57 +41,72 @@ pip install -r requirements.txt
44
 
45
  Then you can enter the directory to run the following command.
46
  ```python
47
- from src.model import MMEBModel
48
- from src.arguments import ModelArguments
49
- from src.utils import load_processor
50
  import torch
51
- from transformers import HfArgumentParser, AutoProcessor
52
  from PIL import Image
53
- import numpy as np
54
- model_args = ModelArguments(
55
- model_name='intfloat/mmE5-mllama-11b-instruct',
56
- pooling='last',
57
- normalize=True,
58
- model_backbone='mllama')
59
- processor = load_processor(model_args)
60
- model = MMEBModel.load(model_args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  model.eval()
62
- model = model.to('cuda', dtype=torch.bfloat16)
63
  # Image + Text -> Text
64
  inputs = processor(text='<|image|><|begin_of_text|> Represent the given image with the following question: What is in the image', images=[Image.open(
65
- 'figures/example.jpg')], return_tensors="pt")
66
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
67
- qry_output = model(qry=inputs)["qry_reps"]
68
  string = 'A cat and a dog'
69
- inputs = processor(text=string, return_tensors="pt")
70
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
71
- tgt_output = model(tgt=inputs)["tgt_reps"]
72
- print(string, '=', model.compute_similarity(qry_output, tgt_output))
73
  ## A cat and a dog = tensor([[0.3965]], device='cuda:0', dtype=torch.bfloat16)
 
74
  string = 'A cat and a tiger'
75
- inputs = processor(text=string, return_tensors="pt")
76
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
77
- tgt_output = model(tgt=inputs)["tgt_reps"]
78
- print(string, '=', model.compute_similarity(qry_output, tgt_output))
79
  ## A cat and a tiger = tensor([[0.3105]], device='cuda:0', dtype=torch.bfloat16)
 
80
  # Text -> Image
81
- inputs = processor(text='Find me an everyday image that matches the given caption: A cat and a dog.', return_tensors="pt")
82
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
83
- qry_output = model(qry=inputs)["qry_reps"]
84
  string = '<|image|><|begin_of_text|> Represent the given image.'
85
- inputs = processor(text=string, images=[Image.open('figures/example.jpg')], return_tensors="pt")
86
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
87
- tgt_output = model(tgt=inputs)["tgt_reps"]
88
- print(string, '=', model.compute_similarity(qry_output, tgt_output))
89
  ## <|image|><|begin_of_text|> Represent the given image. = tensor([[0.4219]], device='cuda:0', dtype=torch.bfloat16)
90
- inputs = processor(text='Find me an everyday image that matches the given caption: A cat and a tiger.', return_tensors="pt")
91
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
92
- qry_output = model(qry=inputs)["qry_reps"]
93
  string = '<|image|><|begin_of_text|> Represent the given image.'
94
- inputs = processor(text=string, images=[Image.open('figures/example.jpg')], return_tensors="pt")
95
- inputs = {key: value.to('cuda') for key, value in inputs.items()}
96
- tgt_output = model(tgt=inputs)["tgt_reps"]
97
- print(string, '=', model.compute_similarity(qry_output, tgt_output))
98
  ## <|image|><|begin_of_text|> Represent the given image. = tensor([[0.3887]], device='cuda:0', dtype=torch.bfloat16)
99
  ```
100
 
 
11
  - pl
12
  - tr
13
  - fr
 
14
  license: mit
 
15
  ---
 
16
  ## mmE5-mllama-11b-instruct
17
 
18
+ [mmE5: Improving Multimodal Multilingual Embeddings via High-quality Synthetic Data](https://arxiv.org/abs/2502.08468.pdf). Haonan Chen, Liang Wang, Nan Yang, Yutao Zhu, Ziliang Zhao, Furu Wei, Zhicheng Dou, arXiv 2024
19
 
20
  This model is trained based on [Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision).
21
 
 
41
 
42
  Then you can enter the directory to run the following command.
43
  ```python
44
+ from transformers import MllamaForConditionalGeneration, AutoProcessor, AutoConfig
 
 
45
  import torch
 
46
  from PIL import Image
47
+
48
+ # Pooling and Normalization
49
+ def last_pooling(last_hidden_state, attention_mask, normalize=True):
50
+ sequence_lengths = attention_mask.sum(dim=1) - 1
51
+ batch_size = last_hidden_state.shape[0]
52
+ reps = last_hidden_state[torch.arange(batch_size, device=last_hidden_state.device), sequence_lengths]
53
+ if normalize:
54
+ reps = torch.nn.functional.normalize(reps, p=2, dim=-1)
55
+ return reps
56
+
57
+ def compute_similarity(q_reps, p_reps):
58
+ return torch.matmul(q_reps, p_reps.transpose(0, 1))
59
+
60
+ model_name = "intfloat/mmE5-mllama-11b-instruct"
61
+
62
+ # Load Processor and Model
63
+ processor = AutoProcessor.from_pretrained(model_name)
64
+ processor.tokenizer.padding_side = "right"
65
+
66
+ config = AutoConfig.from_pretrained(model_name)
67
+ if hasattr(config, 'use_cache'):
68
+ config.use_cache = False
69
+ config.padding_side = "right"
70
+ model = MllamaForConditionalGeneration.from_pretrained(
71
+ model_name, config=config,
72
+ torch_dtype=torch.bfloat16
73
+ ).to("cuda")
74
+ model.padding_side = "right"
75
  model.eval()
76
+
77
  # Image + Text -> Text
78
  inputs = processor(text='<|image|><|begin_of_text|> Represent the given image with the following question: What is in the image', images=[Image.open(
79
+ 'figures/example.jpg')], return_tensors="pt").to("cuda")
80
+ qry_output = last_pooling(model(**inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], inputs['attention_mask'])
81
+
82
  string = 'A cat and a dog'
83
+ text_inputs = processor(text=string, return_tensors="pt").to("cuda")
84
+ tgt_output = last_pooling(model(**text_inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], text_inputs['attention_mask'])
85
+ print(string, '=', compute_similarity(qry_output, tgt_output))
 
86
  ## A cat and a dog = tensor([[0.3965]], device='cuda:0', dtype=torch.bfloat16)
87
+
88
  string = 'A cat and a tiger'
89
+ text_inputs = processor(text=string, return_tensors="pt").to("cuda")
90
+ tgt_output = last_pooling(model(**text_inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], text_inputs['attention_mask'])
91
+ print(string, '=', compute_similarity(qry_output, tgt_output))
 
92
  ## A cat and a tiger = tensor([[0.3105]], device='cuda:0', dtype=torch.bfloat16)
93
+
94
  # Text -> Image
95
+ inputs = processor(text='Find me an everyday image that matches the given caption: A cat and a dog.', return_tensors="pt").to("cuda")
96
+ qry_output = last_pooling(model(**inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], inputs['attention_mask'])
97
+
98
  string = '<|image|><|begin_of_text|> Represent the given image.'
99
+ tgt_inputs = processor(text=string, images=[Image.open('figures/example.jpg')], return_tensors="pt").to("cuda")
100
+ tgt_output = last_pooling(model(**tgt_inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], tgt_inputs['attention_mask'])
101
+ print(string, '=', compute_similarity(qry_output, tgt_output))
 
102
  ## <|image|><|begin_of_text|> Represent the given image. = tensor([[0.4219]], device='cuda:0', dtype=torch.bfloat16)
103
+
104
+ inputs = processor(text='Find me an everyday image that matches the given caption: A cat and a tiger.', return_tensors="pt").to("cuda")
105
+ qry_output = last_pooling(model(**inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], inputs['attention_mask'])
106
  string = '<|image|><|begin_of_text|> Represent the given image.'
107
+ tgt_inputs = processor(text=string, images=[Image.open('figures/example.jpg')], return_tensors="pt").to("cuda")
108
+ tgt_output = last_pooling(model(**tgt_inputs, return_dict=True, output_hidden_states=True).hidden_states[-1], tgt_inputs['attention_mask'])
109
+ print(string, '=', compute_similarity(qry_output, tgt_output))
 
110
  ## <|image|><|begin_of_text|> Represent the given image. = tensor([[0.3887]], device='cuda:0', dtype=torch.bfloat16)
111
  ```
112