File size: 3,062 Bytes
d4607d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d69e29
d4607d7
 
 
5d69e29
 
 
 
 
d4607d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from flax import nnx
import jax.numpy as jnp
from jax import Array as Tensor

from transformers import (FlaxCLIPTextModel, CLIPTokenizer, FlaxT5EncoderModel,
                          T5Tokenizer)


class HFEmbedder(nnx.Module):
    def __init__(self, version: str, max_length: int, **hf_kwargs):
        self.is_clip = version.startswith("openai")
        self.max_length = max_length
        self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
        dtype = hf_kwargs.get("dtype", jnp.float32)
        if self.is_clip:
            self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
            # self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
            self.hf_module, params = FlaxCLIPTextModel.from_pretrained(version, _do_init=False, **hf_kwargs)
        else:
            self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
            # self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
            self.hf_module, params = FlaxT5EncoderModel.from_pretrained(version, _do_init=False,**hf_kwargs)
        self.hf_module._is_initialized = True
        import jax
        self.hf_module.params = jax.tree_map(lambda x: jax.device_put(x, jax.devices("cuda")[0]), params)
        # if dtype==jnp.bfloat16:

    def tokenize(self, text: list[str]) -> Tensor:
        batch_encoding = self.tokenizer(
            text,
            truncation=True,
            max_length=self.max_length,
            return_length=False,
            return_overflowing_tokens=False,
            padding="max_length",
            return_tensors="jax",
        )
        return batch_encoding["input_ids"]
    
    def __call__(self, input_ids: Tensor) -> Tensor:
        # outputs = self.hf_module(
        #     input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
        #     attention_mask=None,
        #     output_hidden_states=False,
        # )
        outputs = self.hf_module(
            input_ids=input_ids,
            attention_mask=None,
            output_hidden_states=False,
            train=False,
        )
        return outputs[self.output_key]
    # def __call__(self, text: list[str]) -> Tensor:
    #     batch_encoding = self.tokenizer(
    #         text,
    #         truncation=True,
    #         max_length=self.max_length,
    #         return_length=False,
    #         return_overflowing_tokens=False,
    #         padding="max_length",
    #         return_tensors="jax",
    #     )

    #     # outputs = self.hf_module(
    #     #     input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
    #     #     attention_mask=None,
    #     #     output_hidden_states=False,
    #     # )
    #     outputs = self.hf_module(
    #         input_ids=batch_encoding["input_ids"],
    #         attention_mask=None,
    #         output_hidden_states=False,
    #         train=False,
    #     )
    #     return outputs[self.output_key]