Theon1130 commited on
Commit
6ecd3ab
1 Parent(s): aee81e4
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. 1e_4_5VQAlora2nd/adapter_config.json +0 -38
  3. 1e_4_5VQAlora2nd/adapter_model.safetensors +0 -3
  4. 1e_4_5VQAlora2nd/training_args.bin +0 -3
  5. 2e_4VQAlora2nd/README.md +0 -202
  6. 2e_4VQAlora2nd/adapter_config.json +0 -38
  7. 2e_4VQAlora2nd/adapter_model.safetensors +0 -3
  8. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/README.md +0 -202
  9. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/adapter_config.json +0 -38
  10. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/adapter_model.safetensors +0 -3
  11. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/optimizer.pt +0 -3
  12. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/rng_state.pth +0 -3
  13. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/scheduler.pt +0 -3
  14. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/trainer_state.json +0 -387
  15. 2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/training_args.bin +0 -3
  16. 2e_4VQAlora2nd/training_args.bin +0 -3
  17. 2e_5_r8_2VQAlora2nd/README.md +0 -202
  18. 2e_5_r8_2VQAlora2nd/adapter_config.json +0 -38
  19. 2e_5_r8_2VQAlora2nd/adapter_model.safetensors +0 -3
  20. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/README.md +0 -202
  21. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_config.json +0 -38
  22. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_model.safetensors +0 -3
  23. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/optimizer.pt +0 -3
  24. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/rng_state.pth +0 -3
  25. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/scheduler.pt +0 -3
  26. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/trainer_state.json +0 -533
  27. 2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/training_args.bin +0 -3
  28. 2e_5_r8_2VQAlora2nd/training_args.bin +0 -3
  29. 2ndvqarad/.DS_Store +0 -0
  30. {1e_4_5VQAlora2nd → 2ndvqarad}/README.md +0 -0
  31. {4e_5_2VQAlora2nd → 2ndvqarad}/adapter_config.json +0 -0
  32. {4e_5_2VQAlora2nd → 2ndvqarad}/adapter_model.safetensors +0 -0
  33. {4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80 → 2ndvqarad}/training_args.bin +0 -0
  34. 4e_5_2VQAlora2nd/README.md +0 -202
  35. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/README.md +0 -202
  36. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_config.json +0 -38
  37. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_model.safetensors +0 -3
  38. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/optimizer.pt +0 -3
  39. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/rng_state.pth +0 -3
  40. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/scheduler.pt +0 -3
  41. 4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/trainer_state.json +0 -149
  42. 4e_5_2VQAlora2nd/training_args.bin +0 -3
  43. 4e_5_r8_2VQAlora2nd/README.md +0 -202
  44. 4e_5_r8_2VQAlora2nd/adapter_config.json +0 -38
  45. 4e_5_r8_2VQAlora2nd/adapter_model.safetensors +0 -3
  46. 4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/README.md +0 -202
  47. 4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_config.json +0 -38
  48. 4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_model.safetensors +0 -3
  49. 4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/optimizer.pt +0 -3
  50. 4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/rng_state.pth +0 -3
.DS_Store ADDED
Binary file (6.15 kB). View file
 
1e_4_5VQAlora2nd/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 32,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 16,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "up_proj",
29
- "k_proj",
30
- "down_proj",
31
- "gate_proj",
32
- "v_proj",
33
- "q_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e_4_5VQAlora2nd/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b507789928c1185809fe88cf9e9338c4d2d8e6a4dd2fd5b643e79bc44b6ae92
3
- size 122253960
 
 
 
 
1e_4_5VQAlora2nd/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a60a1ee56456d3b3806208343cc707a04b9f707304c89603082cd68308697d19
3
- size 5048
 
 
 
 
2e_4VQAlora2nd/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_4VQAlora2nd/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 32,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 16,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "gate_proj",
29
- "q_proj",
30
- "k_proj",
31
- "v_proj",
32
- "up_proj",
33
- "down_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_4VQAlora2nd/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a4b4deeb65392c7018217ae04dfa46666d8fc66497eec74aa17ce1b7f02cf22
3
- size 122253960
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 32,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 16,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "gate_proj",
29
- "q_proj",
30
- "k_proj",
31
- "v_proj",
32
- "up_proj",
33
- "down_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a4b4deeb65392c7018217ae04dfa46666d8fc66497eec74aa17ce1b7f02cf22
3
- size 122253960
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:88d774ce65502974f774cf40347737670dbc0ca67047de9ac0169410c9f9eb2f
3
- size 235243422
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:633d1564811aadc8cff03d10dcbf1246a96633cf1a7e2cc7340327ff91359be3
3
- size 14244
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b345f6de2f2c60f45e2823074cc59e0f17987d968e5dd42664de99dc91e0be50
3
- size 1000
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/trainer_state.json DELETED
@@ -1,387 +0,0 @@
1
- {
2
- "best_metric": 0.6941810250282288,
3
- "best_model_checkpoint": "/workspace/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50",
4
- "epoch": 0.8918617614269788,
5
- "eval_steps": 25,
6
- "global_step": 50,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.017837235228539576,
13
- "grad_norm": 8.9375,
14
- "learning_rate": 5e-05,
15
- "loss": 0.9688,
16
- "step": 1
17
- },
18
- {
19
- "epoch": 0.03567447045707915,
20
- "grad_norm": 3.0625,
21
- "learning_rate": 0.0001,
22
- "loss": 0.4082,
23
- "step": 2
24
- },
25
- {
26
- "epoch": 0.05351170568561873,
27
- "grad_norm": 3.75,
28
- "learning_rate": 0.00015000000000000001,
29
- "loss": 0.7148,
30
- "step": 3
31
- },
32
- {
33
- "epoch": 0.0713489409141583,
34
- "grad_norm": 3.546875,
35
- "learning_rate": 0.0002,
36
- "loss": 0.918,
37
- "step": 4
38
- },
39
- {
40
- "epoch": 0.08918617614269789,
41
- "grad_norm": 2.65625,
42
- "learning_rate": 0.0001999576950082201,
43
- "loss": 0.5273,
44
- "step": 5
45
- },
46
- {
47
- "epoch": 0.10702341137123746,
48
- "grad_norm": 3.03125,
49
- "learning_rate": 0.00019983081582712685,
50
- "loss": 0.4492,
51
- "step": 6
52
- },
53
- {
54
- "epoch": 0.12486064659977704,
55
- "grad_norm": 6.59375,
56
- "learning_rate": 0.00019961946980917456,
57
- "loss": 0.9805,
58
- "step": 7
59
- },
60
- {
61
- "epoch": 0.1426978818283166,
62
- "grad_norm": 2.890625,
63
- "learning_rate": 0.00019932383577419432,
64
- "loss": 0.6289,
65
- "step": 8
66
- },
67
- {
68
- "epoch": 0.1605351170568562,
69
- "grad_norm": 2.96875,
70
- "learning_rate": 0.00019894416385809444,
71
- "loss": 0.8203,
72
- "step": 9
73
- },
74
- {
75
- "epoch": 0.17837235228539577,
76
- "grad_norm": 2.609375,
77
- "learning_rate": 0.00019848077530122083,
78
- "loss": 0.5859,
79
- "step": 10
80
- },
81
- {
82
- "epoch": 0.19620958751393533,
83
- "grad_norm": 3.015625,
84
- "learning_rate": 0.00019793406217655517,
85
- "loss": 0.6523,
86
- "step": 11
87
- },
88
- {
89
- "epoch": 0.2140468227424749,
90
- "grad_norm": 2.78125,
91
- "learning_rate": 0.00019730448705798239,
92
- "loss": 0.5,
93
- "step": 12
94
- },
95
- {
96
- "epoch": 0.2318840579710145,
97
- "grad_norm": 2.328125,
98
- "learning_rate": 0.00019659258262890683,
99
- "loss": 0.4648,
100
- "step": 13
101
- },
102
- {
103
- "epoch": 0.24972129319955408,
104
- "grad_norm": 3.875,
105
- "learning_rate": 0.0001957989512315489,
106
- "loss": 0.5742,
107
- "step": 14
108
- },
109
- {
110
- "epoch": 0.26755852842809363,
111
- "grad_norm": 3.296875,
112
- "learning_rate": 0.0001949242643573034,
113
- "loss": 0.4785,
114
- "step": 15
115
- },
116
- {
117
- "epoch": 0.2853957636566332,
118
- "grad_norm": 3.109375,
119
- "learning_rate": 0.00019396926207859084,
120
- "loss": 0.6953,
121
- "step": 16
122
- },
123
- {
124
- "epoch": 0.3032329988851728,
125
- "grad_norm": 3.296875,
126
- "learning_rate": 0.00019293475242268223,
127
- "loss": 0.6445,
128
- "step": 17
129
- },
130
- {
131
- "epoch": 0.3210702341137124,
132
- "grad_norm": 3.046875,
133
- "learning_rate": 0.00019182161068802741,
134
- "loss": 0.8047,
135
- "step": 18
136
- },
137
- {
138
- "epoch": 0.33890746934225197,
139
- "grad_norm": 3.171875,
140
- "learning_rate": 0.000190630778703665,
141
- "loss": 0.8047,
142
- "step": 19
143
- },
144
- {
145
- "epoch": 0.35674470457079155,
146
- "grad_norm": 2.640625,
147
- "learning_rate": 0.00018936326403234125,
148
- "loss": 0.5664,
149
- "step": 20
150
- },
151
- {
152
- "epoch": 0.3745819397993311,
153
- "grad_norm": 2.984375,
154
- "learning_rate": 0.00018802013911801112,
155
- "loss": 0.5352,
156
- "step": 21
157
- },
158
- {
159
- "epoch": 0.39241917502787066,
160
- "grad_norm": 2.421875,
161
- "learning_rate": 0.00018660254037844388,
162
- "loss": 0.7227,
163
- "step": 22
164
- },
165
- {
166
- "epoch": 0.41025641025641024,
167
- "grad_norm": 2.625,
168
- "learning_rate": 0.00018511166724369997,
169
- "loss": 0.7852,
170
- "step": 23
171
- },
172
- {
173
- "epoch": 0.4280936454849498,
174
- "grad_norm": 2.953125,
175
- "learning_rate": 0.00018354878114129367,
176
- "loss": 0.5391,
177
- "step": 24
178
- },
179
- {
180
- "epoch": 0.4459308807134894,
181
- "grad_norm": 2.125,
182
- "learning_rate": 0.0001819152044288992,
183
- "loss": 0.4434,
184
- "step": 25
185
- },
186
- {
187
- "epoch": 0.4459308807134894,
188
- "eval_loss": 0.6694146394729614,
189
- "eval_runtime": 170.6904,
190
- "eval_samples_per_second": 2.642,
191
- "eval_steps_per_second": 1.324,
192
- "step": 25
193
- },
194
- {
195
- "epoch": 0.463768115942029,
196
- "grad_norm": 2.25,
197
- "learning_rate": 0.0001802123192755044,
198
- "loss": 0.3809,
199
- "step": 26
200
- },
201
- {
202
- "epoch": 0.4816053511705686,
203
- "grad_norm": 2.734375,
204
- "learning_rate": 0.00017844156649195759,
205
- "loss": 0.6016,
206
- "step": 27
207
- },
208
- {
209
- "epoch": 0.49944258639910816,
210
- "grad_norm": 3.015625,
211
- "learning_rate": 0.0001766044443118978,
212
- "loss": 0.6406,
213
- "step": 28
214
- },
215
- {
216
- "epoch": 0.5172798216276477,
217
- "grad_norm": 2.65625,
218
- "learning_rate": 0.0001747025071240996,
219
- "loss": 0.4551,
220
- "step": 29
221
- },
222
- {
223
- "epoch": 0.5351170568561873,
224
- "grad_norm": 3.015625,
225
- "learning_rate": 0.00017273736415730488,
226
- "loss": 0.7344,
227
- "step": 30
228
- },
229
- {
230
- "epoch": 0.5529542920847269,
231
- "grad_norm": 3.25,
232
- "learning_rate": 0.00017071067811865476,
233
- "loss": 0.6602,
234
- "step": 31
235
- },
236
- {
237
- "epoch": 0.5707915273132664,
238
- "grad_norm": 3.34375,
239
- "learning_rate": 0.0001686241637868734,
240
- "loss": 0.6523,
241
- "step": 32
242
- },
243
- {
244
- "epoch": 0.5886287625418061,
245
- "grad_norm": 3.328125,
246
- "learning_rate": 0.00016647958656139378,
247
- "loss": 0.4199,
248
- "step": 33
249
- },
250
- {
251
- "epoch": 0.6064659977703456,
252
- "grad_norm": 2.859375,
253
- "learning_rate": 0.00016427876096865394,
254
- "loss": 0.7227,
255
- "step": 34
256
- },
257
- {
258
- "epoch": 0.6243032329988851,
259
- "grad_norm": 2.96875,
260
- "learning_rate": 0.000162023549126826,
261
- "loss": 0.6211,
262
- "step": 35
263
- },
264
- {
265
- "epoch": 0.6421404682274248,
266
- "grad_norm": 2.59375,
267
- "learning_rate": 0.00015971585917027862,
268
- "loss": 0.7539,
269
- "step": 36
270
- },
271
- {
272
- "epoch": 0.6599777034559643,
273
- "grad_norm": 3.4375,
274
- "learning_rate": 0.0001573576436351046,
275
- "loss": 0.918,
276
- "step": 37
277
- },
278
- {
279
- "epoch": 0.6778149386845039,
280
- "grad_norm": 3.5,
281
- "learning_rate": 0.0001549508978070806,
282
- "loss": 0.793,
283
- "step": 38
284
- },
285
- {
286
- "epoch": 0.6956521739130435,
287
- "grad_norm": 2.03125,
288
- "learning_rate": 0.000152497658033456,
289
- "loss": 0.4609,
290
- "step": 39
291
- },
292
- {
293
- "epoch": 0.7134894091415831,
294
- "grad_norm": 1.796875,
295
- "learning_rate": 0.00015000000000000001,
296
- "loss": 0.5117,
297
- "step": 40
298
- },
299
- {
300
- "epoch": 0.7313266443701226,
301
- "grad_norm": 1.625,
302
- "learning_rate": 0.00014746003697476404,
303
- "loss": 0.3906,
304
- "step": 41
305
- },
306
- {
307
- "epoch": 0.7491638795986622,
308
- "grad_norm": 3.140625,
309
- "learning_rate": 0.00014487991802004623,
310
- "loss": 0.7383,
311
- "step": 42
312
- },
313
- {
314
- "epoch": 0.7670011148272018,
315
- "grad_norm": 2.828125,
316
- "learning_rate": 0.00014226182617406996,
317
- "loss": 0.5664,
318
- "step": 43
319
- },
320
- {
321
- "epoch": 0.7848383500557413,
322
- "grad_norm": 2.796875,
323
- "learning_rate": 0.0001396079766039157,
324
- "loss": 0.5391,
325
- "step": 44
326
- },
327
- {
328
- "epoch": 0.802675585284281,
329
- "grad_norm": 2.203125,
330
- "learning_rate": 0.00013692061473126845,
331
- "loss": 0.5469,
332
- "step": 45
333
- },
334
- {
335
- "epoch": 0.8205128205128205,
336
- "grad_norm": 2.796875,
337
- "learning_rate": 0.00013420201433256689,
338
- "loss": 0.6172,
339
- "step": 46
340
- },
341
- {
342
- "epoch": 0.8383500557413601,
343
- "grad_norm": 2.421875,
344
- "learning_rate": 0.00013145447561516138,
345
- "loss": 0.5,
346
- "step": 47
347
- },
348
- {
349
- "epoch": 0.8561872909698997,
350
- "grad_norm": 2.46875,
351
- "learning_rate": 0.00012868032327110904,
352
- "loss": 0.4844,
353
- "step": 48
354
- },
355
- {
356
- "epoch": 0.8740245261984393,
357
- "grad_norm": 3.125,
358
- "learning_rate": 0.00012588190451025207,
359
- "loss": 0.4883,
360
- "step": 49
361
- },
362
- {
363
- "epoch": 0.8918617614269788,
364
- "grad_norm": 2.125,
365
- "learning_rate": 0.00012306158707424403,
366
- "loss": 0.3809,
367
- "step": 50
368
- },
369
- {
370
- "epoch": 0.8918617614269788,
371
- "eval_loss": 0.6941810250282288,
372
- "eval_runtime": 169.4879,
373
- "eval_samples_per_second": 2.661,
374
- "eval_steps_per_second": 1.333,
375
- "step": 50
376
- }
377
- ],
378
- "logging_steps": 1,
379
- "max_steps": 112,
380
- "num_input_tokens_seen": 0,
381
- "num_train_epochs": 2,
382
- "save_steps": 50,
383
- "total_flos": 3116060910391296.0,
384
- "train_batch_size": 2,
385
- "trial_name": null,
386
- "trial_params": null
387
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_4VQAlora2nd/checkpoints/LVQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-50/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:89025018165da662a325262415e63725b8a9b5048c65b43e2fa1d0c1f35f79c1
3
- size 5048
 
 
 
 
2e_4VQAlora2nd/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:89025018165da662a325262415e63725b8a9b5048c65b43e2fa1d0c1f35f79c1
3
- size 5048
 
 
 
 
2e_5_r8_2VQAlora2nd/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_5_r8_2VQAlora2nd/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 16,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 8,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "up_proj",
29
- "q_proj",
30
- "v_proj",
31
- "down_proj",
32
- "gate_proj",
33
- "k_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_5_r8_2VQAlora2nd/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b0232e69491294015ab0a4efd85b1b6e1b7d740369cb5272f16a0c3a0b1dd2e
3
- size 82145000
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 16,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 8,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "up_proj",
29
- "q_proj",
30
- "v_proj",
31
- "down_proj",
32
- "gate_proj",
33
- "k_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ecd06d454a5ef19de39fef869cf97fd790549b09548406d84a8c939662c9951
3
- size 82145000
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e58c4171fc1708d1a7c9827f39f2780c58fa8c1147ac2040914171331cbcca38
3
- size 159745054
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d84d4f48c468a94fd4072c91a59f565bee1ca2a3e59f347bb7cf50192b12534d
3
- size 14244
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3bf58c2acdbed25fa02315fba4c7735d145c4d933480a8c2806b5a368de6c669
3
- size 1000
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/trainer_state.json DELETED
@@ -1,533 +0,0 @@
1
- {
2
- "best_metric": 0.623073935508728,
3
- "best_model_checkpoint": "/workspace/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80",
4
- "epoch": 1.4269788182831662,
5
- "eval_steps": 20,
6
- "global_step": 80,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.02,
13
- "learning_rate": 5e-06,
14
- "loss": 0.8086,
15
- "step": 1
16
- },
17
- {
18
- "epoch": 0.04,
19
- "learning_rate": 1e-05,
20
- "loss": 0.6797,
21
- "step": 2
22
- },
23
- {
24
- "epoch": 0.05,
25
- "learning_rate": 1.5000000000000002e-05,
26
- "loss": 0.4551,
27
- "step": 3
28
- },
29
- {
30
- "epoch": 0.07,
31
- "learning_rate": 2e-05,
32
- "loss": 0.543,
33
- "step": 4
34
- },
35
- {
36
- "epoch": 0.09,
37
- "learning_rate": 1.9995769500822007e-05,
38
- "loss": 0.3105,
39
- "step": 5
40
- },
41
- {
42
- "epoch": 0.11,
43
- "learning_rate": 1.9983081582712684e-05,
44
- "loss": 0.4727,
45
- "step": 6
46
- },
47
- {
48
- "epoch": 0.12,
49
- "learning_rate": 1.9961946980917457e-05,
50
- "loss": 0.6914,
51
- "step": 7
52
- },
53
- {
54
- "epoch": 0.14,
55
- "learning_rate": 1.9932383577419432e-05,
56
- "loss": 0.5586,
57
- "step": 8
58
- },
59
- {
60
- "epoch": 0.16,
61
- "learning_rate": 1.9894416385809444e-05,
62
- "loss": 0.4395,
63
- "step": 9
64
- },
65
- {
66
- "epoch": 0.18,
67
- "learning_rate": 1.9848077530122083e-05,
68
- "loss": 0.457,
69
- "step": 10
70
- },
71
- {
72
- "epoch": 0.2,
73
- "learning_rate": 1.9793406217655516e-05,
74
- "loss": 0.3574,
75
- "step": 11
76
- },
77
- {
78
- "epoch": 0.21,
79
- "learning_rate": 1.973044870579824e-05,
80
- "loss": 0.6641,
81
- "step": 12
82
- },
83
- {
84
- "epoch": 0.23,
85
- "learning_rate": 1.9659258262890683e-05,
86
- "loss": 0.5117,
87
- "step": 13
88
- },
89
- {
90
- "epoch": 0.25,
91
- "learning_rate": 1.957989512315489e-05,
92
- "loss": 0.5742,
93
- "step": 14
94
- },
95
- {
96
- "epoch": 0.27,
97
- "learning_rate": 1.949242643573034e-05,
98
- "loss": 0.4824,
99
- "step": 15
100
- },
101
- {
102
- "epoch": 0.29,
103
- "learning_rate": 1.9396926207859085e-05,
104
- "loss": 0.416,
105
- "step": 16
106
- },
107
- {
108
- "epoch": 0.3,
109
- "learning_rate": 1.9293475242268224e-05,
110
- "loss": 0.625,
111
- "step": 17
112
- },
113
- {
114
- "epoch": 0.32,
115
- "learning_rate": 1.9182161068802742e-05,
116
- "loss": 0.2354,
117
- "step": 18
118
- },
119
- {
120
- "epoch": 0.34,
121
- "learning_rate": 1.9063077870366504e-05,
122
- "loss": 0.7891,
123
- "step": 19
124
- },
125
- {
126
- "epoch": 0.36,
127
- "learning_rate": 1.8936326403234125e-05,
128
- "loss": 0.668,
129
- "step": 20
130
- },
131
- {
132
- "epoch": 0.36,
133
- "eval_loss": 0.6457709074020386,
134
- "eval_runtime": 330.0437,
135
- "eval_samples_per_second": 1.366,
136
- "eval_steps_per_second": 0.685,
137
- "step": 20
138
- },
139
- {
140
- "epoch": 0.37,
141
- "learning_rate": 1.880201391180111e-05,
142
- "loss": 0.5391,
143
- "step": 21
144
- },
145
- {
146
- "epoch": 0.39,
147
- "learning_rate": 1.866025403784439e-05,
148
- "loss": 0.3848,
149
- "step": 22
150
- },
151
- {
152
- "epoch": 0.41,
153
- "learning_rate": 1.8511166724369997e-05,
154
- "loss": 0.4434,
155
- "step": 23
156
- },
157
- {
158
- "epoch": 0.43,
159
- "learning_rate": 1.8354878114129368e-05,
160
- "loss": 0.5977,
161
- "step": 24
162
- },
163
- {
164
- "epoch": 0.45,
165
- "learning_rate": 1.819152044288992e-05,
166
- "loss": 0.543,
167
- "step": 25
168
- },
169
- {
170
- "epoch": 0.46,
171
- "learning_rate": 1.802123192755044e-05,
172
- "loss": 0.7422,
173
- "step": 26
174
- },
175
- {
176
- "epoch": 0.48,
177
- "learning_rate": 1.784415664919576e-05,
178
- "loss": 0.5039,
179
- "step": 27
180
- },
181
- {
182
- "epoch": 0.5,
183
- "learning_rate": 1.766044443118978e-05,
184
- "loss": 0.3848,
185
- "step": 28
186
- },
187
- {
188
- "epoch": 0.52,
189
- "learning_rate": 1.7470250712409963e-05,
190
- "loss": 0.5195,
191
- "step": 29
192
- },
193
- {
194
- "epoch": 0.54,
195
- "learning_rate": 1.7273736415730488e-05,
196
- "loss": 0.3945,
197
- "step": 30
198
- },
199
- {
200
- "epoch": 0.55,
201
- "learning_rate": 1.7071067811865477e-05,
202
- "loss": 0.5586,
203
- "step": 31
204
- },
205
- {
206
- "epoch": 0.57,
207
- "learning_rate": 1.686241637868734e-05,
208
- "loss": 0.4512,
209
- "step": 32
210
- },
211
- {
212
- "epoch": 0.59,
213
- "learning_rate": 1.6647958656139377e-05,
214
- "loss": 0.5273,
215
- "step": 33
216
- },
217
- {
218
- "epoch": 0.61,
219
- "learning_rate": 1.6427876096865394e-05,
220
- "loss": 0.5273,
221
- "step": 34
222
- },
223
- {
224
- "epoch": 0.62,
225
- "learning_rate": 1.6202354912682602e-05,
226
- "loss": 0.7734,
227
- "step": 35
228
- },
229
- {
230
- "epoch": 0.64,
231
- "learning_rate": 1.5971585917027864e-05,
232
- "loss": 0.875,
233
- "step": 36
234
- },
235
- {
236
- "epoch": 0.66,
237
- "learning_rate": 1.573576436351046e-05,
238
- "loss": 0.6602,
239
- "step": 37
240
- },
241
- {
242
- "epoch": 0.68,
243
- "learning_rate": 1.5495089780708062e-05,
244
- "loss": 0.8047,
245
- "step": 38
246
- },
247
- {
248
- "epoch": 0.7,
249
- "learning_rate": 1.5249765803345602e-05,
250
- "loss": 0.793,
251
- "step": 39
252
- },
253
- {
254
- "epoch": 0.71,
255
- "learning_rate": 1.5000000000000002e-05,
256
- "loss": 0.6562,
257
- "step": 40
258
- },
259
- {
260
- "epoch": 0.71,
261
- "eval_loss": 0.6437277793884277,
262
- "eval_runtime": 322.5002,
263
- "eval_samples_per_second": 1.398,
264
- "eval_steps_per_second": 0.701,
265
- "step": 40
266
- },
267
- {
268
- "epoch": 0.73,
269
- "learning_rate": 1.4746003697476406e-05,
270
- "loss": 0.5859,
271
- "step": 41
272
- },
273
- {
274
- "epoch": 0.75,
275
- "learning_rate": 1.4487991802004625e-05,
276
- "loss": 0.6328,
277
- "step": 42
278
- },
279
- {
280
- "epoch": 0.77,
281
- "learning_rate": 1.4226182617406996e-05,
282
- "loss": 0.4727,
283
- "step": 43
284
- },
285
- {
286
- "epoch": 0.78,
287
- "learning_rate": 1.396079766039157e-05,
288
- "loss": 0.6836,
289
- "step": 44
290
- },
291
- {
292
- "epoch": 0.8,
293
- "learning_rate": 1.3692061473126845e-05,
294
- "loss": 0.6875,
295
- "step": 45
296
- },
297
- {
298
- "epoch": 0.82,
299
- "learning_rate": 1.342020143325669e-05,
300
- "loss": 0.75,
301
- "step": 46
302
- },
303
- {
304
- "epoch": 0.84,
305
- "learning_rate": 1.3145447561516138e-05,
306
- "loss": 0.4512,
307
- "step": 47
308
- },
309
- {
310
- "epoch": 0.86,
311
- "learning_rate": 1.2868032327110904e-05,
312
- "loss": 0.6875,
313
- "step": 48
314
- },
315
- {
316
- "epoch": 0.87,
317
- "learning_rate": 1.2588190451025209e-05,
318
- "loss": 0.7734,
319
- "step": 49
320
- },
321
- {
322
- "epoch": 0.89,
323
- "learning_rate": 1.2306158707424402e-05,
324
- "loss": 0.6328,
325
- "step": 50
326
- },
327
- {
328
- "epoch": 0.91,
329
- "learning_rate": 1.2022175723320382e-05,
330
- "loss": 0.6992,
331
- "step": 51
332
- },
333
- {
334
- "epoch": 0.93,
335
- "learning_rate": 1.1736481776669307e-05,
336
- "loss": 0.5977,
337
- "step": 52
338
- },
339
- {
340
- "epoch": 0.95,
341
- "learning_rate": 1.1449318593072468e-05,
342
- "loss": 0.5742,
343
- "step": 53
344
- },
345
- {
346
- "epoch": 0.96,
347
- "learning_rate": 1.1160929141252303e-05,
348
- "loss": 0.3633,
349
- "step": 54
350
- },
351
- {
352
- "epoch": 0.98,
353
- "learning_rate": 1.0871557427476585e-05,
354
- "loss": 0.6836,
355
- "step": 55
356
- },
357
- {
358
- "epoch": 1.0,
359
- "learning_rate": 1.0581448289104759e-05,
360
- "loss": 0.8086,
361
- "step": 56
362
- },
363
- {
364
- "epoch": 1.02,
365
- "learning_rate": 1.0290847187431115e-05,
366
- "loss": 0.3984,
367
- "step": 57
368
- },
369
- {
370
- "epoch": 1.03,
371
- "learning_rate": 1e-05,
372
- "loss": 0.4531,
373
- "step": 58
374
- },
375
- {
376
- "epoch": 1.05,
377
- "learning_rate": 9.709152812568886e-06,
378
- "loss": 0.5039,
379
- "step": 59
380
- },
381
- {
382
- "epoch": 1.07,
383
- "learning_rate": 9.418551710895243e-06,
384
- "loss": 0.6211,
385
- "step": 60
386
- },
387
- {
388
- "epoch": 1.07,
389
- "eval_loss": 0.6246171593666077,
390
- "eval_runtime": 327.5115,
391
- "eval_samples_per_second": 1.377,
392
- "eval_steps_per_second": 0.69,
393
- "step": 60
394
- },
395
- {
396
- "epoch": 1.09,
397
- "learning_rate": 9.128442572523418e-06,
398
- "loss": 0.3184,
399
- "step": 61
400
- },
401
- {
402
- "epoch": 1.11,
403
- "learning_rate": 8.839070858747697e-06,
404
- "loss": 0.4434,
405
- "step": 62
406
- },
407
- {
408
- "epoch": 1.12,
409
- "learning_rate": 8.550681406927534e-06,
410
- "loss": 0.4141,
411
- "step": 63
412
- },
413
- {
414
- "epoch": 1.14,
415
- "learning_rate": 8.263518223330698e-06,
416
- "loss": 0.3477,
417
- "step": 64
418
- },
419
- {
420
- "epoch": 1.16,
421
- "learning_rate": 7.977824276679623e-06,
422
- "loss": 0.4844,
423
- "step": 65
424
- },
425
- {
426
- "epoch": 1.18,
427
- "learning_rate": 7.6938412925756e-06,
428
- "loss": 0.4902,
429
- "step": 66
430
- },
431
- {
432
- "epoch": 1.2,
433
- "learning_rate": 7.411809548974792e-06,
434
- "loss": 0.5547,
435
- "step": 67
436
- },
437
- {
438
- "epoch": 1.21,
439
- "learning_rate": 7.131967672889101e-06,
440
- "loss": 0.3965,
441
- "step": 68
442
- },
443
- {
444
- "epoch": 1.23,
445
- "learning_rate": 6.854552438483866e-06,
446
- "loss": 0.6133,
447
- "step": 69
448
- },
449
- {
450
- "epoch": 1.25,
451
- "learning_rate": 6.579798566743314e-06,
452
- "loss": 0.5312,
453
- "step": 70
454
- },
455
- {
456
- "epoch": 1.27,
457
- "learning_rate": 6.3079385268731575e-06,
458
- "loss": 0.4766,
459
- "step": 71
460
- },
461
- {
462
- "epoch": 1.28,
463
- "learning_rate": 6.039202339608432e-06,
464
- "loss": 0.8125,
465
- "step": 72
466
- },
467
- {
468
- "epoch": 1.3,
469
- "learning_rate": 5.773817382593008e-06,
470
- "loss": 0.4551,
471
- "step": 73
472
- },
473
- {
474
- "epoch": 1.32,
475
- "learning_rate": 5.512008197995379e-06,
476
- "loss": 0.4219,
477
- "step": 74
478
- },
479
- {
480
- "epoch": 1.34,
481
- "learning_rate": 5.253996302523596e-06,
482
- "loss": 0.3867,
483
- "step": 75
484
- },
485
- {
486
- "epoch": 1.36,
487
- "learning_rate": 5.000000000000003e-06,
488
- "loss": 0.5156,
489
- "step": 76
490
- },
491
- {
492
- "epoch": 1.37,
493
- "learning_rate": 4.7502341966544e-06,
494
- "loss": 0.5469,
495
- "step": 77
496
- },
497
- {
498
- "epoch": 1.39,
499
- "learning_rate": 4.504910219291941e-06,
500
- "loss": 0.3984,
501
- "step": 78
502
- },
503
- {
504
- "epoch": 1.41,
505
- "learning_rate": 4.264235636489542e-06,
506
- "loss": 0.4395,
507
- "step": 79
508
- },
509
- {
510
- "epoch": 1.43,
511
- "learning_rate": 4.028414082972141e-06,
512
- "loss": 0.6719,
513
- "step": 80
514
- },
515
- {
516
- "epoch": 1.43,
517
- "eval_loss": 0.623073935508728,
518
- "eval_runtime": 322.1595,
519
- "eval_samples_per_second": 1.4,
520
- "eval_steps_per_second": 0.702,
521
- "step": 80
522
- }
523
- ],
524
- "logging_steps": 1,
525
- "max_steps": 112,
526
- "num_input_tokens_seen": 0,
527
- "num_train_epochs": 2,
528
- "save_steps": 40,
529
- "total_flos": 4979777395845120.0,
530
- "train_batch_size": 2,
531
- "trial_name": null,
532
- "trial_params": null
533
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e_5_r8_2VQAlora2nd/checkpoints/r82_2e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3fb2b54d9ce3c79a2533cc4f7113f96de9b3af85fa1d22489c50d5969473c5a
3
- size 4792
 
 
 
 
2e_5_r8_2VQAlora2nd/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3fb2b54d9ce3c79a2533cc4f7113f96de9b3af85fa1d22489c50d5969473c5a
3
- size 4792
 
 
 
 
2ndvqarad/.DS_Store ADDED
Binary file (6.15 kB). View file
 
{1e_4_5VQAlora2nd → 2ndvqarad}/README.md RENAMED
File without changes
{4e_5_2VQAlora2nd → 2ndvqarad}/adapter_config.json RENAMED
File without changes
{4e_5_2VQAlora2nd → 2ndvqarad}/adapter_model.safetensors RENAMED
File without changes
{4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80 → 2ndvqarad}/training_args.bin RENAMED
File without changes
4e_5_2VQAlora2nd/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 32,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 16,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "k_proj",
29
- "q_proj",
30
- "v_proj",
31
- "up_proj",
32
- "gate_proj",
33
- "down_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ec223b0f496bb70eced664b2e732b675e4285577505c475ba9c48886f4fbcd5
3
- size 122253960
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ba45b246b37370bda13027e4d8bd1349db0353bb2b3a540a7892a9d76989300
3
- size 235243422
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d84d4f48c468a94fd4072c91a59f565bee1ca2a3e59f347bb7cf50192b12534d
3
- size 14244
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:91de2e5150296a3888b9d86e8412c0c3aac23a6d23700fef3e75d90a2393e8e1
3
- size 1000
 
 
 
 
4e_5_2VQAlora2nd/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/trainer_state.json DELETED
@@ -1,149 +0,0 @@
1
- {
2
- "best_metric": 0.6341778635978699,
3
- "best_model_checkpoint": "/workspace/checkpoints/2_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80",
4
- "epoch": 1.4269788182831662,
5
- "eval_steps": 20,
6
- "global_step": 80,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.09,
13
- "learning_rate": 3.9991539001644015e-05,
14
- "loss": 0.5469,
15
- "step": 5
16
- },
17
- {
18
- "epoch": 0.18,
19
- "learning_rate": 3.9696155060244166e-05,
20
- "loss": 0.5094,
21
- "step": 10
22
- },
23
- {
24
- "epoch": 0.27,
25
- "learning_rate": 3.898485287146068e-05,
26
- "loss": 0.4844,
27
- "step": 15
28
- },
29
- {
30
- "epoch": 0.36,
31
- "learning_rate": 3.787265280646825e-05,
32
- "loss": 0.5625,
33
- "step": 20
34
- },
35
- {
36
- "epoch": 0.36,
37
- "eval_loss": 0.6666426062583923,
38
- "eval_runtime": 313.5071,
39
- "eval_samples_per_second": 1.439,
40
- "eval_steps_per_second": 0.721,
41
- "step": 20
42
- },
43
- {
44
- "epoch": 0.45,
45
- "learning_rate": 3.638304088577984e-05,
46
- "loss": 0.4938,
47
- "step": 25
48
- },
49
- {
50
- "epoch": 0.54,
51
- "learning_rate": 3.4547472831460976e-05,
52
- "loss": 0.4969,
53
- "step": 30
54
- },
55
- {
56
- "epoch": 0.62,
57
- "learning_rate": 3.2404709825365204e-05,
58
- "loss": 0.5219,
59
- "step": 35
60
- },
61
- {
62
- "epoch": 0.71,
63
- "learning_rate": 3.0000000000000004e-05,
64
- "loss": 0.7219,
65
- "step": 40
66
- },
67
- {
68
- "epoch": 0.71,
69
- "eval_loss": 0.6467391848564148,
70
- "eval_runtime": 318.6731,
71
- "eval_samples_per_second": 1.415,
72
- "eval_steps_per_second": 0.709,
73
- "step": 40
74
- },
75
- {
76
- "epoch": 0.8,
77
- "learning_rate": 2.738412294625369e-05,
78
- "loss": 0.575,
79
- "step": 45
80
- },
81
- {
82
- "epoch": 0.89,
83
- "learning_rate": 2.4612317414848804e-05,
84
- "loss": 0.6344,
85
- "step": 50
86
- },
87
- {
88
- "epoch": 0.98,
89
- "learning_rate": 2.174311485495317e-05,
90
- "loss": 0.5469,
91
- "step": 55
92
- },
93
- {
94
- "epoch": 1.07,
95
- "learning_rate": 1.8837103421790486e-05,
96
- "loss": 0.4469,
97
- "step": 60
98
- },
99
- {
100
- "epoch": 1.07,
101
- "eval_loss": 0.6086602210998535,
102
- "eval_runtime": 320.0767,
103
- "eval_samples_per_second": 1.409,
104
- "eval_steps_per_second": 0.706,
105
- "step": 60
106
- },
107
- {
108
- "epoch": 1.16,
109
- "learning_rate": 1.5955648553359247e-05,
110
- "loss": 0.3141,
111
- "step": 65
112
- },
113
- {
114
- "epoch": 1.25,
115
- "learning_rate": 1.3159597133486628e-05,
116
- "loss": 0.3953,
117
- "step": 70
118
- },
119
- {
120
- "epoch": 1.34,
121
- "learning_rate": 1.0507992605047193e-05,
122
- "loss": 0.3875,
123
- "step": 75
124
- },
125
- {
126
- "epoch": 1.43,
127
- "learning_rate": 8.056828165944282e-06,
128
- "loss": 0.3766,
129
- "step": 80
130
- },
131
- {
132
- "epoch": 1.43,
133
- "eval_loss": 0.6341778635978699,
134
- "eval_runtime": 315.0947,
135
- "eval_samples_per_second": 1.431,
136
- "eval_steps_per_second": 0.717,
137
- "step": 80
138
- }
139
- ],
140
- "logging_steps": 5,
141
- "max_steps": 112,
142
- "num_input_tokens_seen": 0,
143
- "num_train_epochs": 2,
144
- "save_steps": 40,
145
- "total_flos": 4993136137955328.0,
146
- "train_batch_size": 2,
147
- "trial_name": null,
148
- "trial_params": null
149
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_2VQAlora2nd/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a68ac9690d72595e0b21c7496fbf44aa8a0d4c5bd0264468e35b12eddd8e350
3
- size 4792
 
 
 
 
4e_5_r8_2VQAlora2nd/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_r8_2VQAlora2nd/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 16,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 8,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "k_proj",
29
- "down_proj",
30
- "v_proj",
31
- "up_proj",
32
- "gate_proj",
33
- "q_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_r8_2VQAlora2nd/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4251bacf03189ccc18b2f3b80149c2984023e37d29f135866c07d3b3ea7f27d
3
- size 82145000
 
 
 
 
4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.10.1.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_config.json DELETED
@@ -1,38 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": {
4
- "base_model_class": "LlavaMistralForCausalLM",
5
- "parent_library": "llava.model.language_model.llava_mistral"
6
- },
7
- "base_model_name_or_path": "/workspace/ROCO2nd_RSV_llava-v1.6-mistral_PMC",
8
- "bias": "none",
9
- "fan_in_fan_out": false,
10
- "inference_mode": true,
11
- "init_lora_weights": true,
12
- "layer_replication": null,
13
- "layers_pattern": null,
14
- "layers_to_transform": null,
15
- "loftq_config": {},
16
- "lora_alpha": 16,
17
- "lora_dropout": 0.05,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": [
21
- "mm_projector"
22
- ],
23
- "peft_type": "LORA",
24
- "r": 8,
25
- "rank_pattern": {},
26
- "revision": null,
27
- "target_modules": [
28
- "k_proj",
29
- "down_proj",
30
- "v_proj",
31
- "up_proj",
32
- "gate_proj",
33
- "q_proj"
34
- ],
35
- "task_type": null,
36
- "use_dora": false,
37
- "use_rslora": false
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4251bacf03189ccc18b2f3b80149c2984023e37d29f135866c07d3b3ea7f27d
3
- size 82145000
 
 
 
 
4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:142e2e403e738467fe0ae0d0cad7e4805d9c808c4ce3bb564c84cab83eae59f8
3
- size 159745054
 
 
 
 
4e_5_r8_2VQAlora2nd/checkpoints/r82_4e-5VQA_ROCO2nd_RSV_llava-v1.6-mistral_PMC/checkpoint-80/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d84d4f48c468a94fd4072c91a59f565bee1ca2a3e59f347bb7cf50192b12534d
3
- size 14244