update feedback after the running in the omnicamp.us
Browse files
README.md
CHANGED
@@ -53,7 +53,7 @@ conda install -c conda-forge ipykernel
|
|
53 |
python -m ipykernel install --user --name=unsloth_env --display-name "Python (unsloth_env)"
|
54 |
```
|
55 |
|
56 |
-
## Follow these steps in the notebook:
|
57 |
|
58 |
1. load model
|
59 |
```shell
|
@@ -78,14 +78,15 @@ model, tokenizer = FastLanguageModel.from_pretrained(
|
|
78 |
max_seq_length = max_seq_length,
|
79 |
dtype = dtype,
|
80 |
load_in_4bit = load_in_4bit,
|
81 |
-
# token = "
|
82 |
)
|
83 |
FastLanguageModel.for_inference(model)
|
84 |
-
|
85 |
```
|
86 |
|
87 |
3. Set up datasets and run inference.
|
88 |
|
|
|
|
|
89 |
```python
|
90 |
datasets = []
|
91 |
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
|
@@ -96,7 +97,9 @@ with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
|
|
96 |
if item.endswith("}"):
|
97 |
datasets.append(json.loads(item))
|
98 |
item = ""
|
|
|
99 |
|
|
|
100 |
from tqdm import tqdm
|
101 |
|
102 |
# inference
|
@@ -118,9 +121,8 @@ for dt in tqdm(datasets):
|
|
118 |
|
119 |
```python
|
120 |
file_name = model_name.replace("/", "_") + "_output.jsonl"
|
121 |
-
with open(f"
|
122 |
for result in results:
|
123 |
json.dump(result, f, ensure_ascii=False)
|
124 |
f.write('\n')
|
125 |
-
|
126 |
```
|
|
|
53 |
python -m ipykernel install --user --name=unsloth_env --display-name "Python (unsloth_env)"
|
54 |
```
|
55 |
|
56 |
+
## Follow these steps, run in the notebook:
|
57 |
|
58 |
1. load model
|
59 |
```shell
|
|
|
78 |
max_seq_length = max_seq_length,
|
79 |
dtype = dtype,
|
80 |
load_in_4bit = load_in_4bit,
|
81 |
+
# token = "hf-token", # In the Google Colab case, it call from ENV. If you want to write the token directly, please comment it out.
|
82 |
)
|
83 |
FastLanguageModel.for_inference(model)
|
|
|
84 |
```
|
85 |
|
86 |
3. Set up datasets and run inference.
|
87 |
|
88 |
+
- Upload elyza-tasks-100-TV_0.jsonl to your workspace in manual.
|
89 |
+
|
90 |
```python
|
91 |
datasets = []
|
92 |
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
|
|
|
97 |
if item.endswith("}"):
|
98 |
datasets.append(json.loads(item))
|
99 |
item = ""
|
100 |
+
```
|
101 |
|
102 |
+
```python
|
103 |
from tqdm import tqdm
|
104 |
|
105 |
# inference
|
|
|
121 |
|
122 |
```python
|
123 |
file_name = model_name.replace("/", "_") + "_output.jsonl"
|
124 |
+
with open(f"./{file_name}", 'w', encoding='utf-8') as f:
|
125 |
for result in results:
|
126 |
json.dump(result, f, ensure_ascii=False)
|
127 |
f.write('\n')
|
|
|
128 |
```
|