diff --git a/README.md b/README.md
index c527c4363134076b2a26b0236754a64b9aae6f41..fcc733e583f9476223ee4db18792d5c7524623f3 100644
--- a/README.md
+++ b/README.md
@@ -1,27 +1,19 @@
---
library_name: peft
tags:
-- liuhaotian/llava-v1.5-13b_10.0
- generated_from_trainer
base_model: liuhaotian/llava-v1.5-13b
model-index:
-- name: liuhaotian/llava-v1.5-13b_10.0
+- name: llava_13b_exact_location_name_synthetic
results: []
---
-# liuhaotian/llava-v1.5-13b_10.0
+# llava_13b_exact_location_name_synthetic
-This model is a fine-tuned version of [liuhaotian/llava-v1.5-13b_10.0](https://huggingface.co./liuhaotian/llava-v1.5-13b_10.0) on an unknown dataset.
-It achieves the following results on the evaluation set:
-- eval_loss: 0.0010
-- eval_runtime: 55.7144
-- eval_samples_per_second: 15.562
-- eval_steps_per_second: 0.503
-- epoch: 9.0
-- step: 252
+This model is a fine-tuned version of [liuhaotian/llava-v1.5-13b](https://huggingface.co./liuhaotian/llava-v1.5-13b) on an unknown dataset.
## Model description
diff --git a/adapter_config.json b/adapter_config.json
index c2ddee3559dca06cb07943e3c498fa859ddffe15..f6be111dde4c6451bcdee1632183fb960f7c70b3 100644
--- a/adapter_config.json
+++ b/adapter_config.json
@@ -21,12 +21,12 @@
"revision": null,
"target_modules": [
"k_proj",
- "up_proj",
- "o_proj",
+ "v_proj",
"q_proj",
"down_proj",
- "gate_proj",
- "v_proj"
+ "up_proj",
+ "o_proj",
+ "gate_proj"
],
"task_type": "CAUSAL_LM",
"use_dora": false,
diff --git a/adapter_model.safetensors b/adapter_model.safetensors
index 6c033eed83f09db83ebb80814a6ef65a587d7537..733edd4220f0bc58016303f21240cd38a864fcbc 100644
--- a/adapter_model.safetensors
+++ b/adapter_model.safetensors
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5985803a3f3f0a18082476be321a65f9db36b735e5aadd62159de53c1f605f22
+oid sha256:8bb3a80b637c433e0ea348fa437556e993ed780f00affdec993142f675c2af7d
size 1001466944
diff --git a/num_examples=100/llava-v1.5-13b_1.0/README.md b/num_examples=100/llava-v1.5-13b_1.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=100/llava-v1.5-13b_1.0/adapter_config.json b/num_examples=100/llava-v1.5-13b_1.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f6be111dde4c6451bcdee1632183fb960f7c70b3
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "v_proj",
+ "q_proj",
+ "down_proj",
+ "up_proj",
+ "o_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=100/llava-v1.5-13b_1.0/adapter_model.safetensors b/num_examples=100/llava-v1.5-13b_1.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..733edd4220f0bc58016303f21240cd38a864fcbc
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bb3a80b637c433e0ea348fa437556e993ed780f00affdec993142f675c2af7d
+size 1001466944
diff --git a/num_examples=100/llava-v1.5-13b_1.0/special_tokens_map.json b/num_examples=100/llava-v1.5-13b_1.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=100/llava-v1.5-13b_1.0/tokenizer.model b/num_examples=100/llava-v1.5-13b_1.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=100/llava-v1.5-13b_1.0/tokenizer_config.json b/num_examples=100/llava-v1.5-13b_1.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=100/llava-v1.5-13b_1.0/training_args.bin b/num_examples=100/llava-v1.5-13b_1.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2c41a1ebb577810274e22b98f051ca0985aa7062
--- /dev/null
+++ b/num_examples=100/llava-v1.5-13b_1.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab60061c6db4983ad6a9334f47864cfc12b2212c4796c83a0d247a55439133a6
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_1.0/README.md b/num_examples=200/llava-v1.5-13b_1.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_1.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_1.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_1.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_1.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..040ad72e4562e9ccff672dd1f1c216d0fda83eb2
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1b2582b692b13611e3505defb6e50477ccb75c89fbdcca86cf157e1a7ff687f
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_1.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_1.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_1.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_1.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_1.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_1.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_1.0/training_args.bin b/num_examples=200/llava-v1.5-13b_1.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_1.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_10.0/README.md b/num_examples=200/llava-v1.5-13b_10.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_10.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_10.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_10.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_10.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6c033eed83f09db83ebb80814a6ef65a587d7537
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5985803a3f3f0a18082476be321a65f9db36b735e5aadd62159de53c1f605f22
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_10.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_10.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_10.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_10.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_10.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_10.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_10.0/training_args.bin b/num_examples=200/llava-v1.5-13b_10.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_10.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_2.0/README.md b/num_examples=200/llava-v1.5-13b_2.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_2.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_2.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_2.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_2.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..50624398e879de994ff31b0c9206ee5e7411fdce
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1e42ca24e821a7632ce5d6b9865cf2dd3483723b06e69fa1f53a0eab6c19c90
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_2.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_2.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_2.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_2.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_2.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_2.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_2.0/training_args.bin b/num_examples=200/llava-v1.5-13b_2.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_2.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_3.0/README.md b/num_examples=200/llava-v1.5-13b_3.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_3.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_3.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_3.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_3.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..70478e78dea9a60afe33ffe3e3c50f896dc24142
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2089acf3bf40848a4aa479b41822462e83ee4405b34447707fcb7551f3c4341b
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_3.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_3.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_3.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_3.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_3.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_3.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_3.0/training_args.bin b/num_examples=200/llava-v1.5-13b_3.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_3.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_4.0/README.md b/num_examples=200/llava-v1.5-13b_4.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_4.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_4.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_4.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_4.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ec840c3ab270e56a92d2e98f33d8cd01a15901ff
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a6ee45a7adf5187849bca73285c3830d0db91f3c09bcf79954eda758ab7d994
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_4.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_4.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_4.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_4.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_4.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_4.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_4.0/training_args.bin b/num_examples=200/llava-v1.5-13b_4.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_4.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_5.0/README.md b/num_examples=200/llava-v1.5-13b_5.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_5.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_5.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_5.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_5.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a34d82e35d8c948d4c8942dfe9ba65808f1c5632
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6e66cda615a3e3265faf5b882c6144ef5f42007c888903415f0fa9533d2f1106
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_5.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_5.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_5.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_5.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_5.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_5.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_5.0/training_args.bin b/num_examples=200/llava-v1.5-13b_5.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_5.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_6.0/README.md b/num_examples=200/llava-v1.5-13b_6.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_6.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_6.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_6.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_6.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..756fd1ecc48226c53b79e2761ec6883bd4927af2
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8733188a1dd909269bb6a8365325098f5508038b794865da757eb86bd9d6c0b2
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_6.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_6.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_6.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_6.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_6.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_6.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_6.0/training_args.bin b/num_examples=200/llava-v1.5-13b_6.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_6.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_7.0/README.md b/num_examples=200/llava-v1.5-13b_7.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_7.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_7.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_7.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_7.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d785df380578bbb3cfee1f2fbd381ac534de6760
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18cf7a7d3e9dd7f3110b90091a8441cd5c95671acff2abdcbc7c4d7a6dc78566
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_7.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_7.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_7.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_7.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_7.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_7.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_7.0/training_args.bin b/num_examples=200/llava-v1.5-13b_7.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_7.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_8.0/README.md b/num_examples=200/llava-v1.5-13b_8.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_8.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_8.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_8.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_8.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..222648feb7076f3c633e890138d4481595f69e7e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9374b70a616da686a8c382b9b31a806602de707137bd3481602bf94fa92e77ed
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_8.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_8.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_8.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_8.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_8.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_8.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_8.0/training_args.bin b/num_examples=200/llava-v1.5-13b_8.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_8.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/num_examples=200/llava-v1.5-13b_9.0/README.md b/num_examples=200/llava-v1.5-13b_9.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb436e80fbd4c6055e6bd286b324e98c6a501cd
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: liuhaotian/llava-v1.5-13b
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_9.0/adapter_config.json b/num_examples=200/llava-v1.5-13b_9.0/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddee3559dca06cb07943e3c498fa859ddffe15
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "liuhaotian/llava-v1.5-13b",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 256,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "up_proj",
+ "o_proj",
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/num_examples=200/llava-v1.5-13b_9.0/adapter_model.safetensors b/num_examples=200/llava-v1.5-13b_9.0/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7f27e855a1d89b3c472f1456c2029e2cc6784867
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57bbb27dd08af15c9163170bcd6267d439b33a9de4f6f84e50c70535c3a2ef46
+size 1001466944
diff --git a/num_examples=200/llava-v1.5-13b_9.0/special_tokens_map.json b/num_examples=200/llava-v1.5-13b_9.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/num_examples=200/llava-v1.5-13b_9.0/tokenizer.model b/num_examples=200/llava-v1.5-13b_9.0/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/num_examples=200/llava-v1.5-13b_9.0/tokenizer_config.json b/num_examples=200/llava-v1.5-13b_9.0/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/num_examples=200/llava-v1.5-13b_9.0/training_args.bin b/num_examples=200/llava-v1.5-13b_9.0/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cbf48a1f013a581dea919afeab17d1b9883be6ca
--- /dev/null
+++ b/num_examples=200/llava-v1.5-13b_9.0/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+size 6840
diff --git a/trainer_state.json b/trainer_state.json
index bc28c2d2e37b6c0b4e8f4c3c0f2420ab846b4c42..0f305a8154b3b29c717ba4f9d3312e14c27ef08f 100644
--- a/trainer_state.json
+++ b/trainer_state.json
@@ -17,383 +17,383 @@
{
"epoch": 0.07,
"learning_rate": 6.309297535714573e-05,
- "loss": 1.2013,
+ "loss": 1.2017,
"step": 2
},
{
"epoch": 0.11,
"learning_rate": 0.0001,
- "loss": 1.1723,
+ "loss": 1.1717,
"step": 3
},
{
"epoch": 0.14,
"learning_rate": 0.00012618595071429146,
- "loss": 1.1317,
+ "loss": 1.1308,
"step": 4
},
{
"epoch": 0.18,
"learning_rate": 0.0001464973520717927,
- "loss": 1.0776,
+ "loss": 1.0773,
"step": 5
},
{
"epoch": 0.21,
"learning_rate": 0.00016309297535714573,
- "loss": 1.0266,
+ "loss": 1.0267,
"step": 6
},
{
"epoch": 0.25,
"learning_rate": 0.00017712437491614223,
- "loss": 0.9953,
+ "loss": 0.9945,
"step": 7
},
{
"epoch": 0.29,
"learning_rate": 0.0001892789260714372,
- "loss": 0.9563,
+ "loss": 0.9553,
"step": 8
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
- "loss": 0.9337,
+ "loss": 0.9318,
"step": 9
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
- "loss": 0.9513,
+ "loss": 0.9507,
"step": 10
},
{
"epoch": 0.39,
"learning_rate": 0.0002,
- "loss": 0.8823,
+ "loss": 0.8816,
"step": 11
},
{
"epoch": 0.43,
"learning_rate": 0.0002,
- "loss": 0.8438,
+ "loss": 0.8428,
"step": 12
},
{
"epoch": 0.46,
"learning_rate": 0.0002,
- "loss": 0.8205,
+ "loss": 0.8199,
"step": 13
},
{
"epoch": 0.5,
"learning_rate": 0.0002,
- "loss": 0.7802,
+ "loss": 0.78,
"step": 14
},
{
"epoch": 0.54,
"learning_rate": 0.0002,
- "loss": 0.7714,
+ "loss": 0.7722,
"step": 15
},
{
"epoch": 0.57,
"learning_rate": 0.0002,
- "loss": 0.732,
+ "loss": 0.7317,
"step": 16
},
{
"epoch": 0.61,
"learning_rate": 0.0002,
- "loss": 0.6475,
+ "loss": 0.6472,
"step": 17
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
- "loss": 0.7026,
+ "loss": 0.7027,
"step": 18
},
{
"epoch": 0.68,
"learning_rate": 0.0002,
- "loss": 0.611,
+ "loss": 0.6111,
"step": 19
},
{
"epoch": 0.71,
"learning_rate": 0.0002,
- "loss": 0.6581,
+ "loss": 0.6577,
"step": 20
},
{
"epoch": 0.75,
"learning_rate": 0.0002,
- "loss": 0.5189,
+ "loss": 0.5182,
"step": 21
},
{
"epoch": 0.79,
"learning_rate": 0.0002,
- "loss": 0.4852,
+ "loss": 0.4826,
"step": 22
},
{
"epoch": 0.82,
"learning_rate": 0.0002,
- "loss": 0.4482,
+ "loss": 0.4477,
"step": 23
},
{
"epoch": 0.86,
"learning_rate": 0.0002,
- "loss": 0.3752,
+ "loss": 0.3735,
"step": 24
},
{
"epoch": 0.89,
"learning_rate": 0.0002,
- "loss": 0.4258,
+ "loss": 0.4257,
"step": 25
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
- "loss": 0.3545,
+ "loss": 0.3523,
"step": 26
},
{
"epoch": 0.96,
"learning_rate": 0.0002,
- "loss": 0.3977,
+ "loss": 0.3972,
"step": 27
},
{
"epoch": 1.0,
"learning_rate": 0.0002,
- "loss": 0.3239,
+ "loss": 0.3234,
"step": 28
},
{
"epoch": 1.0,
- "eval_loss": 0.24691244959831238,
- "eval_runtime": 54.9798,
- "eval_samples_per_second": 15.769,
+ "eval_loss": 0.2477385699748993,
+ "eval_runtime": 54.9686,
+ "eval_samples_per_second": 15.773,
"eval_steps_per_second": 0.509,
"step": 28
},
{
"epoch": 1.04,
"learning_rate": 0.0002,
- "loss": 0.2309,
+ "loss": 0.2312,
"step": 29
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
- "loss": 0.2426,
+ "loss": 0.2441,
"step": 30
},
{
"epoch": 1.11,
"learning_rate": 0.0002,
- "loss": 0.2226,
+ "loss": 0.2217,
"step": 31
},
{
"epoch": 1.14,
"learning_rate": 0.0002,
- "loss": 0.1932,
+ "loss": 0.1936,
"step": 32
},
{
"epoch": 1.18,
"learning_rate": 0.0002,
- "loss": 0.212,
+ "loss": 0.2099,
"step": 33
},
{
"epoch": 1.21,
"learning_rate": 0.0002,
- "loss": 0.1909,
+ "loss": 0.1898,
"step": 34
},
{
"epoch": 1.25,
"learning_rate": 0.0002,
- "loss": 0.2315,
+ "loss": 0.2296,
"step": 35
},
{
"epoch": 1.29,
"learning_rate": 0.0002,
- "loss": 0.1531,
+ "loss": 0.1523,
"step": 36
},
{
"epoch": 1.32,
"learning_rate": 0.0002,
- "loss": 0.1523,
+ "loss": 0.151,
"step": 37
},
{
"epoch": 1.36,
"learning_rate": 0.0002,
- "loss": 0.0959,
+ "loss": 0.0952,
"step": 38
},
{
"epoch": 1.39,
"learning_rate": 0.0002,
- "loss": 0.1044,
+ "loss": 0.1017,
"step": 39
},
{
"epoch": 1.43,
"learning_rate": 0.0002,
- "loss": 0.1752,
+ "loss": 0.1736,
"step": 40
},
{
"epoch": 1.46,
"learning_rate": 0.0002,
- "loss": 0.1159,
+ "loss": 0.1143,
"step": 41
},
{
"epoch": 1.5,
"learning_rate": 0.0002,
- "loss": 0.1041,
+ "loss": 0.1064,
"step": 42
},
{
"epoch": 1.54,
"learning_rate": 0.0002,
- "loss": 0.1311,
+ "loss": 0.1315,
"step": 43
},
{
"epoch": 1.57,
"learning_rate": 0.0002,
- "loss": 0.1307,
+ "loss": 0.1297,
"step": 44
},
{
"epoch": 1.61,
"learning_rate": 0.0002,
- "loss": 0.0678,
+ "loss": 0.0686,
"step": 45
},
{
"epoch": 1.64,
"learning_rate": 0.0002,
- "loss": 0.0746,
+ "loss": 0.0729,
"step": 46
},
{
"epoch": 1.68,
"learning_rate": 0.0002,
- "loss": 0.0903,
+ "loss": 0.0909,
"step": 47
},
{
"epoch": 1.71,
"learning_rate": 0.0002,
- "loss": 0.1146,
+ "loss": 0.1128,
"step": 48
},
{
"epoch": 1.75,
"learning_rate": 0.0002,
- "loss": 0.1019,
+ "loss": 0.0989,
"step": 49
},
{
"epoch": 1.79,
"learning_rate": 0.0002,
- "loss": 0.056,
+ "loss": 0.0551,
"step": 50
},
{
"epoch": 1.82,
"learning_rate": 0.0002,
- "loss": 0.1291,
+ "loss": 0.1288,
"step": 51
},
{
"epoch": 1.86,
"learning_rate": 0.0002,
- "loss": 0.0487,
+ "loss": 0.0469,
"step": 52
},
{
"epoch": 1.89,
"learning_rate": 0.0002,
- "loss": 0.1094,
+ "loss": 0.1091,
"step": 53
},
{
"epoch": 1.93,
"learning_rate": 0.0002,
- "loss": 0.1581,
+ "loss": 0.1593,
"step": 54
},
{
"epoch": 1.96,
"learning_rate": 0.0002,
- "loss": 0.1083,
+ "loss": 0.1075,
"step": 55
},
{
"epoch": 2.0,
"learning_rate": 0.0002,
- "loss": 0.0396,
+ "loss": 0.0391,
"step": 56
},
{
"epoch": 2.0,
- "eval_loss": 0.04381483793258667,
- "eval_runtime": 55.1711,
- "eval_samples_per_second": 15.715,
+ "eval_loss": 0.04489068686962128,
+ "eval_runtime": 55.0922,
+ "eval_samples_per_second": 15.737,
"eval_steps_per_second": 0.508,
"step": 56
},
{
"epoch": 2.04,
"learning_rate": 0.0002,
- "loss": 0.0368,
+ "loss": 0.0387,
"step": 57
},
{
"epoch": 2.07,
"learning_rate": 0.0002,
- "loss": 0.0299,
+ "loss": 0.0306,
"step": 58
},
{
"epoch": 2.11,
"learning_rate": 0.0002,
- "loss": 0.0304,
+ "loss": 0.0307,
"step": 59
},
{
"epoch": 2.14,
"learning_rate": 0.0002,
- "loss": 0.0429,
+ "loss": 0.0451,
"step": 60
},
{
"epoch": 2.18,
"learning_rate": 0.0002,
- "loss": 0.0256,
+ "loss": 0.0245,
"step": 61
},
{
"epoch": 2.21,
"learning_rate": 0.0002,
- "loss": 0.0375,
+ "loss": 0.0351,
"step": 62
},
{
@@ -405,323 +405,323 @@
{
"epoch": 2.29,
"learning_rate": 0.0002,
- "loss": 0.022,
+ "loss": 0.0244,
"step": 64
},
{
"epoch": 2.32,
"learning_rate": 0.0002,
- "loss": 0.0292,
+ "loss": 0.0309,
"step": 65
},
{
"epoch": 2.36,
"learning_rate": 0.0002,
- "loss": 0.0273,
+ "loss": 0.0272,
"step": 66
},
{
"epoch": 2.39,
"learning_rate": 0.0002,
- "loss": 0.0152,
+ "loss": 0.0136,
"step": 67
},
{
"epoch": 2.43,
"learning_rate": 0.0002,
- "loss": 0.0301,
+ "loss": 0.0297,
"step": 68
},
{
"epoch": 2.46,
"learning_rate": 0.0002,
- "loss": 0.0358,
+ "loss": 0.0356,
"step": 69
},
{
"epoch": 2.5,
"learning_rate": 0.0002,
- "loss": 0.0269,
+ "loss": 0.0284,
"step": 70
},
{
"epoch": 2.54,
"learning_rate": 0.0002,
- "loss": 0.0179,
+ "loss": 0.0189,
"step": 71
},
{
"epoch": 2.57,
"learning_rate": 0.0002,
- "loss": 0.0183,
+ "loss": 0.0199,
"step": 72
},
{
"epoch": 2.61,
"learning_rate": 0.0002,
- "loss": 0.0132,
+ "loss": 0.0121,
"step": 73
},
{
"epoch": 2.64,
"learning_rate": 0.0002,
- "loss": 0.0329,
+ "loss": 0.0389,
"step": 74
},
{
"epoch": 2.68,
"learning_rate": 0.0002,
- "loss": 0.0267,
+ "loss": 0.0298,
"step": 75
},
{
"epoch": 2.71,
"learning_rate": 0.0002,
- "loss": 0.0193,
+ "loss": 0.0176,
"step": 76
},
{
"epoch": 2.75,
"learning_rate": 0.0002,
- "loss": 0.0101,
+ "loss": 0.0165,
"step": 77
},
{
"epoch": 2.79,
"learning_rate": 0.0002,
- "loss": 0.023,
+ "loss": 0.0255,
"step": 78
},
{
"epoch": 2.82,
"learning_rate": 0.0002,
- "loss": 0.0317,
+ "loss": 0.0282,
"step": 79
},
{
"epoch": 2.86,
"learning_rate": 0.0002,
- "loss": 0.0225,
+ "loss": 0.0247,
"step": 80
},
{
"epoch": 2.89,
"learning_rate": 0.0002,
- "loss": 0.0198,
+ "loss": 0.021,
"step": 81
},
{
"epoch": 2.93,
"learning_rate": 0.0002,
- "loss": 0.0232,
+ "loss": 0.024,
"step": 82
},
{
"epoch": 2.96,
"learning_rate": 0.0002,
- "loss": 0.0287,
+ "loss": 0.0321,
"step": 83
},
{
"epoch": 3.0,
"learning_rate": 0.0002,
- "loss": 0.0086,
+ "loss": 0.0123,
"step": 84
},
{
"epoch": 3.0,
- "eval_loss": 0.01250830665230751,
- "eval_runtime": 54.9958,
- "eval_samples_per_second": 15.765,
- "eval_steps_per_second": 0.509,
+ "eval_loss": 0.012368076480925083,
+ "eval_runtime": 55.1635,
+ "eval_samples_per_second": 15.717,
+ "eval_steps_per_second": 0.508,
"step": 84
},
{
"epoch": 3.04,
"learning_rate": 0.0002,
- "loss": 0.0059,
+ "loss": 0.0057,
"step": 85
},
{
"epoch": 3.07,
"learning_rate": 0.0002,
- "loss": 0.0121,
+ "loss": 0.0139,
"step": 86
},
{
"epoch": 3.11,
"learning_rate": 0.0002,
- "loss": 0.0091,
+ "loss": 0.0095,
"step": 87
},
{
"epoch": 3.14,
"learning_rate": 0.0002,
- "loss": 0.0078,
+ "loss": 0.009,
"step": 88
},
{
"epoch": 3.18,
"learning_rate": 0.0002,
- "loss": 0.0124,
+ "loss": 0.0123,
"step": 89
},
{
"epoch": 3.21,
"learning_rate": 0.0002,
- "loss": 0.0107,
+ "loss": 0.0109,
"step": 90
},
{
"epoch": 3.25,
"learning_rate": 0.0002,
- "loss": 0.0106,
+ "loss": 0.0098,
"step": 91
},
{
"epoch": 3.29,
"learning_rate": 0.0002,
- "loss": 0.0107,
+ "loss": 0.0063,
"step": 92
},
{
"epoch": 3.32,
"learning_rate": 0.0002,
- "loss": 0.0119,
+ "loss": 0.0105,
"step": 93
},
{
"epoch": 3.36,
"learning_rate": 0.0002,
- "loss": 0.0054,
+ "loss": 0.0062,
"step": 94
},
{
"epoch": 3.39,
"learning_rate": 0.0002,
- "loss": 0.0078,
+ "loss": 0.0139,
"step": 95
},
{
"epoch": 3.43,
"learning_rate": 0.0002,
- "loss": 0.0132,
+ "loss": 0.0141,
"step": 96
},
{
"epoch": 3.46,
"learning_rate": 0.0002,
- "loss": 0.0123,
+ "loss": 0.011,
"step": 97
},
{
"epoch": 3.5,
"learning_rate": 0.0002,
- "loss": 0.0144,
+ "loss": 0.0106,
"step": 98
},
{
"epoch": 3.54,
"learning_rate": 0.0002,
- "loss": 0.0099,
+ "loss": 0.0131,
"step": 99
},
{
"epoch": 3.57,
"learning_rate": 0.0002,
- "loss": 0.0075,
+ "loss": 0.009,
"step": 100
},
{
"epoch": 3.61,
"learning_rate": 0.0002,
- "loss": 0.0131,
+ "loss": 0.0204,
"step": 101
},
{
"epoch": 3.64,
"learning_rate": 0.0002,
- "loss": 0.0076,
+ "loss": 0.0117,
"step": 102
},
{
"epoch": 3.68,
"learning_rate": 0.0002,
- "loss": 0.0129,
+ "loss": 0.0156,
"step": 103
},
{
"epoch": 3.71,
"learning_rate": 0.0002,
- "loss": 0.0122,
+ "loss": 0.0137,
"step": 104
},
{
"epoch": 3.75,
"learning_rate": 0.0002,
- "loss": 0.0113,
+ "loss": 0.0157,
"step": 105
},
{
"epoch": 3.79,
"learning_rate": 0.0002,
- "loss": 0.0101,
+ "loss": 0.0143,
"step": 106
},
{
"epoch": 3.82,
"learning_rate": 0.0002,
- "loss": 0.006,
+ "loss": 0.007,
"step": 107
},
{
"epoch": 3.86,
"learning_rate": 0.0002,
- "loss": 0.0078,
+ "loss": 0.0092,
"step": 108
},
{
"epoch": 3.89,
"learning_rate": 0.0002,
- "loss": 0.0052,
+ "loss": 0.0091,
"step": 109
},
{
"epoch": 3.93,
"learning_rate": 0.0002,
- "loss": 0.0155,
+ "loss": 0.0249,
"step": 110
},
{
"epoch": 3.96,
"learning_rate": 0.0002,
- "loss": 0.0043,
+ "loss": 0.0055,
"step": 111
},
{
"epoch": 4.0,
"learning_rate": 0.0002,
- "loss": 0.005,
+ "loss": 0.0055,
"step": 112
},
{
"epoch": 4.0,
- "eval_loss": 0.005612094886600971,
- "eval_runtime": 55.1695,
- "eval_samples_per_second": 15.715,
- "eval_steps_per_second": 0.508,
+ "eval_loss": 0.006369821261614561,
+ "eval_runtime": 55.1727,
+ "eval_samples_per_second": 15.714,
+ "eval_steps_per_second": 0.507,
"step": 112
},
{
"epoch": 4.04,
"learning_rate": 0.0002,
- "loss": 0.0061,
+ "loss": 0.0069,
"step": 113
},
{
"epoch": 4.07,
"learning_rate": 0.0002,
- "loss": 0.0056,
+ "loss": 0.0044,
"step": 114
},
{
@@ -733,31 +733,31 @@
{
"epoch": 4.14,
"learning_rate": 0.0002,
- "loss": 0.0048,
+ "loss": 0.004,
"step": 116
},
{
"epoch": 4.18,
"learning_rate": 0.0002,
- "loss": 0.005,
+ "loss": 0.0049,
"step": 117
},
{
"epoch": 4.21,
"learning_rate": 0.0002,
- "loss": 0.0062,
+ "loss": 0.0048,
"step": 118
},
{
"epoch": 4.25,
"learning_rate": 0.0002,
- "loss": 0.0131,
+ "loss": 0.0122,
"step": 119
},
{
"epoch": 4.29,
"learning_rate": 0.0002,
- "loss": 0.0064,
+ "loss": 0.0052,
"step": 120
},
{
@@ -769,474 +769,474 @@
{
"epoch": 4.36,
"learning_rate": 0.0002,
- "loss": 0.0044,
+ "loss": 0.006,
"step": 122
},
{
"epoch": 4.39,
"learning_rate": 0.0002,
- "loss": 0.0069,
+ "loss": 0.0059,
"step": 123
},
{
"epoch": 4.43,
"learning_rate": 0.0002,
- "loss": 0.0031,
+ "loss": 0.0027,
"step": 124
},
{
"epoch": 4.46,
"learning_rate": 0.0002,
- "loss": 0.0043,
+ "loss": 0.0025,
"step": 125
},
{
"epoch": 4.5,
"learning_rate": 0.0002,
- "loss": 0.0032,
+ "loss": 0.004,
"step": 126
},
{
"epoch": 4.54,
"learning_rate": 0.0002,
- "loss": 0.0028,
+ "loss": 0.0033,
"step": 127
},
{
"epoch": 4.57,
"learning_rate": 0.0002,
- "loss": 0.0053,
+ "loss": 0.0024,
"step": 128
},
{
"epoch": 4.61,
"learning_rate": 0.0002,
- "loss": 0.0052,
+ "loss": 0.0034,
"step": 129
},
{
"epoch": 4.64,
"learning_rate": 0.0002,
- "loss": 0.0106,
+ "loss": 0.0051,
"step": 130
},
{
"epoch": 4.68,
"learning_rate": 0.0002,
- "loss": 0.0071,
+ "loss": 0.0025,
"step": 131
},
{
"epoch": 4.71,
"learning_rate": 0.0002,
- "loss": 0.0044,
+ "loss": 0.0039,
"step": 132
},
{
"epoch": 4.75,
"learning_rate": 0.0002,
- "loss": 0.0075,
+ "loss": 0.005,
"step": 133
},
{
"epoch": 4.79,
"learning_rate": 0.0002,
- "loss": 0.0026,
+ "loss": 0.0013,
"step": 134
},
{
"epoch": 4.82,
"learning_rate": 0.0002,
- "loss": 0.0071,
+ "loss": 0.0041,
"step": 135
},
{
"epoch": 4.86,
"learning_rate": 0.0002,
- "loss": 0.005,
+ "loss": 0.0045,
"step": 136
},
{
"epoch": 4.89,
"learning_rate": 0.0002,
- "loss": 0.0042,
+ "loss": 0.0037,
"step": 137
},
{
"epoch": 4.93,
"learning_rate": 0.0002,
- "loss": 0.0059,
+ "loss": 0.0045,
"step": 138
},
{
"epoch": 4.96,
"learning_rate": 0.0002,
- "loss": 0.004,
+ "loss": 0.0029,
"step": 139
},
{
"epoch": 5.0,
"learning_rate": 0.0002,
- "loss": 0.0051,
+ "loss": 0.0023,
"step": 140
},
{
"epoch": 5.0,
- "eval_loss": 0.004839635919779539,
- "eval_runtime": 55.1037,
- "eval_samples_per_second": 15.734,
- "eval_steps_per_second": 0.508,
+ "eval_loss": 0.0037152974400669336,
+ "eval_runtime": 55.1887,
+ "eval_samples_per_second": 15.71,
+ "eval_steps_per_second": 0.507,
"step": 140
},
{
"epoch": 5.04,
"learning_rate": 0.0002,
- "loss": 0.0045,
+ "loss": 0.001,
"step": 141
},
{
"epoch": 5.07,
"learning_rate": 0.0002,
- "loss": 0.0066,
+ "loss": 0.0036,
"step": 142
},
{
"epoch": 5.11,
"learning_rate": 0.0002,
- "loss": 0.0023,
+ "loss": 0.0031,
"step": 143
},
{
"epoch": 5.14,
"learning_rate": 0.0002,
- "loss": 0.0047,
+ "loss": 0.0085,
"step": 144
},
{
"epoch": 5.18,
"learning_rate": 0.0002,
- "loss": 0.0081,
+ "loss": 0.0053,
"step": 145
},
{
"epoch": 5.21,
"learning_rate": 0.0002,
- "loss": 0.0074,
+ "loss": 0.004,
"step": 146
},
{
"epoch": 5.25,
"learning_rate": 0.0002,
- "loss": 0.002,
+ "loss": 0.0057,
"step": 147
},
{
"epoch": 5.29,
"learning_rate": 0.0002,
- "loss": 0.0049,
+ "loss": 0.0057,
"step": 148
},
{
"epoch": 5.32,
"learning_rate": 0.0002,
- "loss": 0.0093,
+ "loss": 0.0046,
"step": 149
},
{
"epoch": 5.36,
"learning_rate": 0.0002,
- "loss": 0.0028,
+ "loss": 0.003,
"step": 150
},
{
"epoch": 5.39,
"learning_rate": 0.0002,
- "loss": 0.005,
+ "loss": 0.0019,
"step": 151
},
{
"epoch": 5.43,
"learning_rate": 0.0002,
- "loss": 0.0027,
+ "loss": 0.0043,
"step": 152
},
{
"epoch": 5.46,
"learning_rate": 0.0002,
- "loss": 0.0024,
+ "loss": 0.0019,
"step": 153
},
{
"epoch": 5.5,
"learning_rate": 0.0002,
- "loss": 0.0051,
+ "loss": 0.0094,
"step": 154
},
{
"epoch": 5.54,
"learning_rate": 0.0002,
- "loss": 0.0057,
+ "loss": 0.0033,
"step": 155
},
{
"epoch": 5.57,
"learning_rate": 0.0002,
- "loss": 0.0042,
+ "loss": 0.0028,
"step": 156
},
{
"epoch": 5.61,
"learning_rate": 0.0002,
- "loss": 0.0076,
+ "loss": 0.0013,
"step": 157
},
{
"epoch": 5.64,
"learning_rate": 0.0002,
- "loss": 0.0026,
+ "loss": 0.0073,
"step": 158
},
{
"epoch": 5.68,
"learning_rate": 0.0002,
- "loss": 0.0062,
+ "loss": 0.0054,
"step": 159
},
{
"epoch": 5.71,
"learning_rate": 0.0002,
- "loss": 0.0039,
+ "loss": 0.0066,
"step": 160
},
{
"epoch": 5.75,
"learning_rate": 0.0002,
- "loss": 0.0028,
+ "loss": 0.0019,
"step": 161
},
{
"epoch": 5.79,
"learning_rate": 0.0002,
- "loss": 0.0068,
+ "loss": 0.0014,
"step": 162
},
{
"epoch": 5.82,
"learning_rate": 0.0002,
- "loss": 0.001,
+ "loss": 0.0026,
"step": 163
},
{
"epoch": 5.86,
"learning_rate": 0.0002,
- "loss": 0.0036,
+ "loss": 0.0016,
"step": 164
},
{
"epoch": 5.89,
"learning_rate": 0.0002,
- "loss": 0.004,
+ "loss": 0.0022,
"step": 165
},
{
"epoch": 5.93,
"learning_rate": 0.0002,
- "loss": 0.0028,
+ "loss": 0.0077,
"step": 166
},
{
"epoch": 5.96,
"learning_rate": 0.0002,
- "loss": 0.0046,
+ "loss": 0.0054,
"step": 167
},
{
"epoch": 6.0,
"learning_rate": 0.0002,
- "loss": 0.0036,
+ "loss": 0.0049,
"step": 168
},
{
"epoch": 6.0,
- "eval_loss": 0.0032981247641146183,
- "eval_runtime": 55.1126,
- "eval_samples_per_second": 15.731,
- "eval_steps_per_second": 0.508,
+ "eval_loss": 0.003725806251168251,
+ "eval_runtime": 55.2317,
+ "eval_samples_per_second": 15.698,
+ "eval_steps_per_second": 0.507,
"step": 168
},
{
"epoch": 6.04,
"learning_rate": 0.0002,
- "loss": 0.0021,
+ "loss": 0.0037,
"step": 169
},
{
"epoch": 6.07,
"learning_rate": 0.0002,
- "loss": 0.0053,
+ "loss": 0.0022,
"step": 170
},
{
"epoch": 6.11,
"learning_rate": 0.0002,
- "loss": 0.0034,
+ "loss": 0.0021,
"step": 171
},
{
"epoch": 6.14,
"learning_rate": 0.0002,
- "loss": 0.0051,
+ "loss": 0.0045,
"step": 172
},
{
"epoch": 6.18,
"learning_rate": 0.0002,
- "loss": 0.004,
+ "loss": 0.0026,
"step": 173
},
{
"epoch": 6.21,
"learning_rate": 0.0002,
- "loss": 0.0032,
+ "loss": 0.0026,
"step": 174
},
{
"epoch": 6.25,
"learning_rate": 0.0002,
- "loss": 0.0039,
+ "loss": 0.005,
"step": 175
},
{
"epoch": 6.29,
"learning_rate": 0.0002,
- "loss": 0.0045,
+ "loss": 0.0048,
"step": 176
},
{
"epoch": 6.32,
"learning_rate": 0.0002,
- "loss": 0.0105,
+ "loss": 0.0066,
"step": 177
},
{
"epoch": 6.36,
"learning_rate": 0.0002,
- "loss": 0.0017,
+ "loss": 0.0028,
"step": 178
},
{
"epoch": 6.39,
"learning_rate": 0.0002,
- "loss": 0.0073,
+ "loss": 0.006,
"step": 179
},
{
"epoch": 6.43,
"learning_rate": 0.0002,
- "loss": 0.0088,
+ "loss": 0.0008,
"step": 180
},
{
"epoch": 6.46,
"learning_rate": 0.0002,
- "loss": 0.0029,
+ "loss": 0.0037,
"step": 181
},
{
"epoch": 6.5,
"learning_rate": 0.0002,
- "loss": 0.0087,
+ "loss": 0.0051,
"step": 182
},
{
"epoch": 6.54,
"learning_rate": 0.0002,
- "loss": 0.0045,
+ "loss": 0.0028,
"step": 183
},
{
"epoch": 6.57,
"learning_rate": 0.0002,
- "loss": 0.0096,
+ "loss": 0.0064,
"step": 184
},
{
"epoch": 6.61,
"learning_rate": 0.0002,
- "loss": 0.0036,
+ "loss": 0.0025,
"step": 185
},
{
"epoch": 6.64,
"learning_rate": 0.0002,
- "loss": 0.0061,
+ "loss": 0.0025,
"step": 186
},
{
"epoch": 6.68,
"learning_rate": 0.0002,
- "loss": 0.0052,
+ "loss": 0.0043,
"step": 187
},
{
"epoch": 6.71,
"learning_rate": 0.0002,
- "loss": 0.0033,
+ "loss": 0.0017,
"step": 188
},
{
"epoch": 6.75,
"learning_rate": 0.0002,
- "loss": 0.0072,
+ "loss": 0.002,
"step": 189
},
{
"epoch": 6.79,
"learning_rate": 0.0002,
- "loss": 0.0016,
+ "loss": 0.0009,
"step": 190
},
{
"epoch": 6.82,
"learning_rate": 0.0002,
- "loss": 0.0024,
+ "loss": 0.0018,
"step": 191
},
{
"epoch": 6.86,
"learning_rate": 0.0002,
- "loss": 0.0044,
+ "loss": 0.0037,
"step": 192
},
{
"epoch": 6.89,
"learning_rate": 0.0002,
- "loss": 0.0011,
+ "loss": 0.0013,
"step": 193
},
{
"epoch": 6.93,
"learning_rate": 0.0002,
- "loss": 0.0054,
+ "loss": 0.002,
"step": 194
},
{
"epoch": 6.96,
"learning_rate": 0.0002,
- "loss": 0.0023,
+ "loss": 0.002,
"step": 195
},
{
"epoch": 7.0,
"learning_rate": 0.0002,
- "loss": 0.006,
+ "loss": 0.0009,
"step": 196
},
{
"epoch": 7.0,
- "eval_loss": 0.0035514547489583492,
- "eval_runtime": 55.2585,
- "eval_samples_per_second": 15.69,
+ "eval_loss": 0.0015701488591730595,
+ "eval_runtime": 55.1955,
+ "eval_samples_per_second": 15.708,
"eval_steps_per_second": 0.507,
"step": 196
},
@@ -1249,522 +1249,522 @@
{
"epoch": 7.07,
"learning_rate": 0.0002,
- "loss": 0.0049,
+ "loss": 0.0018,
"step": 198
},
{
"epoch": 7.11,
"learning_rate": 0.0002,
- "loss": 0.002,
+ "loss": 0.0015,
"step": 199
},
{
"epoch": 7.14,
"learning_rate": 0.0002,
- "loss": 0.0067,
+ "loss": 0.0016,
"step": 200
},
{
"epoch": 7.18,
"learning_rate": 0.0002,
- "loss": 0.002,
+ "loss": 0.0014,
"step": 201
},
{
"epoch": 7.21,
"learning_rate": 0.0002,
- "loss": 0.0024,
+ "loss": 0.0032,
"step": 202
},
{
"epoch": 7.25,
"learning_rate": 0.0002,
- "loss": 0.0032,
+ "loss": 0.0006,
"step": 203
},
{
"epoch": 7.29,
"learning_rate": 0.0002,
- "loss": 0.0088,
+ "loss": 0.0018,
"step": 204
},
{
"epoch": 7.32,
"learning_rate": 0.0002,
- "loss": 0.0013,
+ "loss": 0.0005,
"step": 205
},
{
"epoch": 7.36,
"learning_rate": 0.0002,
- "loss": 0.0034,
+ "loss": 0.0024,
"step": 206
},
{
"epoch": 7.39,
"learning_rate": 0.0002,
- "loss": 0.0022,
+ "loss": 0.0027,
"step": 207
},
{
"epoch": 7.43,
"learning_rate": 0.0002,
- "loss": 0.0016,
+ "loss": 0.0005,
"step": 208
},
{
"epoch": 7.46,
"learning_rate": 0.0002,
- "loss": 0.0024,
+ "loss": 0.0056,
"step": 209
},
{
"epoch": 7.5,
"learning_rate": 0.0002,
- "loss": 0.0023,
+ "loss": 0.0015,
"step": 210
},
{
"epoch": 7.54,
"learning_rate": 0.0002,
- "loss": 0.0022,
+ "loss": 0.0019,
"step": 211
},
{
"epoch": 7.57,
"learning_rate": 0.0002,
- "loss": 0.0022,
+ "loss": 0.0014,
"step": 212
},
{
"epoch": 7.61,
"learning_rate": 0.0002,
- "loss": 0.0084,
+ "loss": 0.0009,
"step": 213
},
{
"epoch": 7.64,
"learning_rate": 0.0002,
- "loss": 0.0039,
+ "loss": 0.0017,
"step": 214
},
{
"epoch": 7.68,
"learning_rate": 0.0002,
- "loss": 0.0035,
+ "loss": 0.0037,
"step": 215
},
{
"epoch": 7.71,
"learning_rate": 0.0002,
- "loss": 0.0021,
+ "loss": 0.0009,
"step": 216
},
{
"epoch": 7.75,
"learning_rate": 0.0002,
- "loss": 0.004,
+ "loss": 0.0021,
"step": 217
},
{
"epoch": 7.79,
"learning_rate": 0.0002,
- "loss": 0.0027,
+ "loss": 0.0024,
"step": 218
},
{
"epoch": 7.82,
"learning_rate": 0.0002,
- "loss": 0.0019,
+ "loss": 0.002,
"step": 219
},
{
"epoch": 7.86,
"learning_rate": 0.0002,
- "loss": 0.0013,
+ "loss": 0.0011,
"step": 220
},
{
"epoch": 7.89,
"learning_rate": 0.0002,
- "loss": 0.0043,
+ "loss": 0.002,
"step": 221
},
{
"epoch": 7.93,
"learning_rate": 0.0002,
- "loss": 0.0012,
+ "loss": 0.0042,
"step": 222
},
{
"epoch": 7.96,
"learning_rate": 0.0002,
- "loss": 0.0008,
+ "loss": 0.0046,
"step": 223
},
{
"epoch": 8.0,
"learning_rate": 0.0002,
- "loss": 0.0017,
+ "loss": 0.0009,
"step": 224
},
{
"epoch": 8.0,
- "eval_loss": 0.0020963489077985287,
- "eval_runtime": 55.1683,
- "eval_samples_per_second": 15.716,
- "eval_steps_per_second": 0.508,
+ "eval_loss": 0.00099816860165447,
+ "eval_runtime": 55.2372,
+ "eval_samples_per_second": 15.696,
+ "eval_steps_per_second": 0.507,
"step": 224
},
{
"epoch": 8.04,
"learning_rate": 0.0002,
- "loss": 0.002,
+ "loss": 0.0005,
"step": 225
},
{
"epoch": 8.07,
"learning_rate": 0.0002,
- "loss": 0.0008,
+ "loss": 0.0007,
"step": 226
},
{
"epoch": 8.11,
"learning_rate": 0.0002,
- "loss": 0.0017,
+ "loss": 0.003,
"step": 227
},
{
"epoch": 8.14,
"learning_rate": 0.0002,
- "loss": 0.0013,
+ "loss": 0.0024,
"step": 228
},
{
"epoch": 8.18,
"learning_rate": 0.0002,
- "loss": 0.0007,
+ "loss": 0.0023,
"step": 229
},
{
"epoch": 8.21,
"learning_rate": 0.0002,
- "loss": 0.0005,
+ "loss": 0.0024,
"step": 230
},
{
"epoch": 8.25,
"learning_rate": 0.0002,
- "loss": 0.0021,
+ "loss": 0.0018,
"step": 231
},
{
"epoch": 8.29,
"learning_rate": 0.0002,
- "loss": 0.0037,
+ "loss": 0.0031,
"step": 232
},
{
"epoch": 8.32,
"learning_rate": 0.0002,
- "loss": 0.0034,
+ "loss": 0.001,
"step": 233
},
{
"epoch": 8.36,
"learning_rate": 0.0002,
- "loss": 0.0038,
+ "loss": 0.0011,
"step": 234
},
{
"epoch": 8.39,
"learning_rate": 0.0002,
- "loss": 0.0018,
+ "loss": 0.0022,
"step": 235
},
{
"epoch": 8.43,
"learning_rate": 0.0002,
- "loss": 0.0006,
+ "loss": 0.0008,
"step": 236
},
{
"epoch": 8.46,
"learning_rate": 0.0002,
- "loss": 0.0118,
+ "loss": 0.0006,
"step": 237
},
{
"epoch": 8.5,
"learning_rate": 0.0002,
- "loss": 0.0046,
+ "loss": 0.0016,
"step": 238
},
{
"epoch": 8.54,
"learning_rate": 0.0002,
- "loss": 0.0043,
+ "loss": 0.0016,
"step": 239
},
{
"epoch": 8.57,
"learning_rate": 0.0002,
- "loss": 0.0023,
+ "loss": 0.0012,
"step": 240
},
{
"epoch": 8.61,
"learning_rate": 0.0002,
- "loss": 0.0038,
+ "loss": 0.0011,
"step": 241
},
{
"epoch": 8.64,
"learning_rate": 0.0002,
- "loss": 0.0056,
+ "loss": 0.0024,
"step": 242
},
{
"epoch": 8.68,
"learning_rate": 0.0002,
- "loss": 0.0083,
+ "loss": 0.0013,
"step": 243
},
{
"epoch": 8.71,
"learning_rate": 0.0002,
- "loss": 0.0039,
+ "loss": 0.0006,
"step": 244
},
{
"epoch": 8.75,
"learning_rate": 0.0002,
- "loss": 0.003,
+ "loss": 0.0005,
"step": 245
},
{
"epoch": 8.79,
"learning_rate": 0.0002,
- "loss": 0.0021,
+ "loss": 0.0025,
"step": 246
},
{
"epoch": 8.82,
"learning_rate": 0.0002,
- "loss": 0.002,
+ "loss": 0.0026,
"step": 247
},
{
"epoch": 8.86,
"learning_rate": 0.0002,
- "loss": 0.0037,
+ "loss": 0.0015,
"step": 248
},
{
"epoch": 8.89,
"learning_rate": 0.0002,
- "loss": 0.0014,
+ "loss": 0.0012,
"step": 249
},
{
"epoch": 8.93,
"learning_rate": 0.0002,
- "loss": 0.0037,
+ "loss": 0.0012,
"step": 250
},
{
"epoch": 8.96,
"learning_rate": 0.0002,
- "loss": 0.0031,
+ "loss": 0.0011,
"step": 251
},
{
"epoch": 9.0,
"learning_rate": 0.0002,
- "loss": 0.0041,
+ "loss": 0.0007,
"step": 252
},
{
"epoch": 9.0,
- "eval_loss": 0.0022224283311516047,
- "eval_runtime": 55.1607,
- "eval_samples_per_second": 15.718,
- "eval_steps_per_second": 0.508,
+ "eval_loss": 0.0009729066514410079,
+ "eval_runtime": 55.7144,
+ "eval_samples_per_second": 15.562,
+ "eval_steps_per_second": 0.503,
"step": 252
},
{
"epoch": 9.04,
"learning_rate": 0.0002,
- "loss": 0.0009,
+ "loss": 0.0019,
"step": 253
},
{
"epoch": 9.07,
"learning_rate": 0.0002,
- "loss": 0.0011,
+ "loss": 0.001,
"step": 254
},
{
"epoch": 9.11,
"learning_rate": 0.0002,
- "loss": 0.0038,
+ "loss": 0.0004,
"step": 255
},
{
"epoch": 9.14,
"learning_rate": 0.0002,
- "loss": 0.0026,
+ "loss": 0.0004,
"step": 256
},
{
"epoch": 9.18,
"learning_rate": 0.0002,
- "loss": 0.004,
+ "loss": 0.0022,
"step": 257
},
{
"epoch": 9.21,
"learning_rate": 0.0002,
- "loss": 0.0047,
+ "loss": 0.001,
"step": 258
},
{
"epoch": 9.25,
"learning_rate": 0.0002,
- "loss": 0.0023,
+ "loss": 0.0013,
"step": 259
},
{
"epoch": 9.29,
"learning_rate": 0.0002,
- "loss": 0.003,
+ "loss": 0.0012,
"step": 260
},
{
"epoch": 9.32,
"learning_rate": 0.0002,
- "loss": 0.0007,
+ "loss": 0.0028,
"step": 261
},
{
"epoch": 9.36,
"learning_rate": 0.0002,
- "loss": 0.0073,
+ "loss": 0.0004,
"step": 262
},
{
"epoch": 9.39,
"learning_rate": 0.0002,
- "loss": 0.0023,
+ "loss": 0.0006,
"step": 263
},
{
"epoch": 9.43,
"learning_rate": 0.0002,
- "loss": 0.0081,
+ "loss": 0.0053,
"step": 264
},
{
"epoch": 9.46,
"learning_rate": 0.0002,
- "loss": 0.0043,
+ "loss": 0.001,
"step": 265
},
{
"epoch": 9.5,
"learning_rate": 0.0002,
- "loss": 0.0039,
+ "loss": 0.0029,
"step": 266
},
{
"epoch": 9.54,
"learning_rate": 0.0002,
- "loss": 0.0052,
+ "loss": 0.0008,
"step": 267
},
{
"epoch": 9.57,
"learning_rate": 0.0002,
- "loss": 0.0049,
+ "loss": 0.0044,
"step": 268
},
{
"epoch": 9.61,
"learning_rate": 0.0002,
- "loss": 0.0076,
+ "loss": 0.0012,
"step": 269
},
{
"epoch": 9.64,
"learning_rate": 0.0002,
- "loss": 0.0062,
+ "loss": 0.0017,
"step": 270
},
{
"epoch": 9.68,
"learning_rate": 0.0002,
- "loss": 0.005,
+ "loss": 0.0033,
"step": 271
},
{
"epoch": 9.71,
"learning_rate": 0.0002,
- "loss": 0.0063,
+ "loss": 0.0021,
"step": 272
},
{
"epoch": 9.75,
"learning_rate": 0.0002,
- "loss": 0.0052,
+ "loss": 0.0019,
"step": 273
},
{
"epoch": 9.79,
"learning_rate": 0.0002,
- "loss": 0.0042,
+ "loss": 0.001,
"step": 274
},
{
"epoch": 9.82,
"learning_rate": 0.0002,
- "loss": 0.0087,
+ "loss": 0.0045,
"step": 275
},
{
"epoch": 9.86,
"learning_rate": 0.0002,
- "loss": 0.0064,
+ "loss": 0.0032,
"step": 276
},
{
"epoch": 9.89,
"learning_rate": 0.0002,
- "loss": 0.0047,
+ "loss": 0.0039,
"step": 277
},
{
"epoch": 9.93,
"learning_rate": 0.0002,
- "loss": 0.0034,
+ "loss": 0.0048,
"step": 278
},
{
"epoch": 9.96,
"learning_rate": 0.0002,
- "loss": 0.0054,
+ "loss": 0.0035,
"step": 279
},
{
"epoch": 10.0,
"learning_rate": 0.0002,
- "loss": 0.0031,
+ "loss": 0.0018,
"step": 280
},
{
"epoch": 10.0,
- "eval_loss": 0.004367400426417589,
- "eval_runtime": 55.2334,
- "eval_samples_per_second": 15.697,
+ "eval_loss": 0.0019094761228188872,
+ "eval_runtime": 55.2125,
+ "eval_samples_per_second": 15.703,
"eval_steps_per_second": 0.507,
"step": 280
},
@@ -1772,10 +1772,10 @@
"epoch": 10.0,
"step": 280,
"total_flos": 8.298694499798876e+17,
- "train_loss": 0.09713323155904488,
- "train_runtime": 3137.8873,
- "train_samples_per_second": 2.763,
- "train_steps_per_second": 0.089
+ "train_loss": 0.09645156941977413,
+ "train_runtime": 3386.4389,
+ "train_samples_per_second": 2.56,
+ "train_steps_per_second": 0.083
}
],
"logging_steps": 1.0,
diff --git a/training_args.bin b/training_args.bin
index cbf48a1f013a581dea919afeab17d1b9883be6ca..2c41a1ebb577810274e22b98f051ca0985aa7062 100644
--- a/training_args.bin
+++ b/training_args.bin
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e8e7002d9da22439cd2142660ed0913d77b3af07a95d83f820f2b137e4beb14e
+oid sha256:ab60061c6db4983ad6a9334f47864cfc12b2212c4796c83a0d247a55439133a6
size 6840