xcczach commited on
Commit
42d0fc5
1 Parent(s): f1799a1

Upload model

Browse files
Files changed (4) hide show
  1. config.json +14 -14
  2. configuration_test.py +16 -16
  3. modeling_test.py +20 -20
  4. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,14 +1,14 @@
1
- {
2
- "architectures": [
3
- "TestModel"
4
- ],
5
- "auto_map": {
6
- "AutoConfig": "configuration_test.TestConfig",
7
- "AutoModel": "modeling_test.TestModel"
8
- },
9
- "input_dim": 10,
10
- "model_type": "my_test_model",
11
- "output_dim": 5,
12
- "torch_dtype": "float32",
13
- "transformers_version": "4.39.1"
14
- }
 
1
+ {
2
+ "architectures": [
3
+ "TestModel"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_test.TestConfig",
7
+ "AutoModel": "modeling_test.TestModel"
8
+ },
9
+ "input_dim": 10,
10
+ "model_type": "my_test_model",
11
+ "output_dim": 5,
12
+ "torch_dtype": "float32",
13
+ "transformers_version": "4.37.2"
14
+ }
configuration_test.py CHANGED
@@ -1,16 +1,16 @@
1
- from transformers import PretrainedConfig
2
- from typing import List
3
-
4
-
5
- class TestConfig(PretrainedConfig):
6
- model_type = "my_test_model"
7
-
8
- def __init__(
9
- self,
10
- input_dim: int = 20,
11
- output_dim: int = 10,
12
- **kwargs,
13
- ):
14
- self.input_dim = input_dim
15
- self.output_dim = output_dim
16
- super().__init__(**kwargs)
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+
5
+ class TestConfig(PretrainedConfig):
6
+ model_type = "my_test_model"
7
+
8
+ def __init__(
9
+ self,
10
+ input_dim: int = 20,
11
+ output_dim: int = 10,
12
+ **kwargs,
13
+ ):
14
+ self.input_dim = input_dim
15
+ self.output_dim = output_dim
16
+ super().__init__(**kwargs)
modeling_test.py CHANGED
@@ -1,20 +1,20 @@
1
- from transformers import PreTrainedModel
2
- from .configuration_test import TestConfig
3
- import torch.nn as nn
4
- from transformers import AutoModelForMaskedLM, AutoConfig
5
- from transformers import AutoModelForSequenceClassification
6
-
7
-
8
- class TestModel(PreTrainedModel):
9
- config_class = TestConfig
10
-
11
- def __init__(self, config: TestConfig):
12
- super().__init__(config)
13
- self.input_dim = config.input_dim
14
- self.model1 = nn.Linear(config.input_dim, config.output_dim)
15
- self.model2 = AutoModelForMaskedLM.from_config(
16
- AutoConfig.from_pretrained("albert/albert-base-v2")
17
- )
18
-
19
- def forward(self, tensor):
20
- return self.model1(tensor)
 
1
+ from transformers import PreTrainedModel
2
+ from .configuration_test import TestConfig
3
+ import torch.nn as nn
4
+ from transformers import AutoModelForMaskedLM, AutoConfig
5
+ from transformers import AutoModelForSequenceClassification
6
+
7
+
8
+ class TestModel(PreTrainedModel):
9
+ config_class = TestConfig
10
+
11
+ def __init__(self, config: TestConfig):
12
+ super().__init__(config)
13
+ self.input_dim = config.input_dim
14
+ self.model1 = nn.Linear(config.input_dim, config.output_dim)
15
+ self.model2 = AutoModelForMaskedLM.from_config(
16
+ AutoConfig.from_pretrained("albert/albert-base-v2")
17
+ )
18
+
19
+ def forward(self, tensor):
20
+ return self.model1(tensor)
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f88faa6c688ec9ab7b59910120c1e3bf97817b8d41a68104f608db5b2830260
3
- size 44897099
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c636fd32fccd184fcfaf1c852070f2144848824682ae2048a02e0658ce545a24
3
+ size 44897544