Gokalpozer
commited on
Commit
•
f35b077
1
Parent(s):
a8c03f7
Update README.md
Browse files
README.md
CHANGED
@@ -84,7 +84,7 @@ pipeline_tag: text-generation
|
|
84 |
|
85 |
|
86 |
<div style="display: flex; justify-content: center; align-items: center; flex-direction: column">
|
87 |
-
<h1 style="font-size: 5em; margin-bottom: 0; padding-bottom: 0;">MARS</h1>
|
88 |
<aside>by <a href="https://curiosity.tech">Curiosity Technology</a></aside>
|
89 |
</div>
|
90 |
|
@@ -111,7 +111,7 @@ You can run conversational inference using the Transformers pipeline abstraction
|
|
111 |
import transformers
|
112 |
import torch
|
113 |
|
114 |
-
model_id = "curiositytech/MARS"
|
115 |
|
116 |
pipeline = transformers.pipeline(
|
117 |
"text-generation",
|
@@ -147,7 +147,7 @@ print(outputs[0]["generated_text"][-1])
|
|
147 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
148 |
import torch
|
149 |
|
150 |
-
model_id = "curiositytech/MARS"
|
151 |
|
152 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
153 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
84 |
|
85 |
|
86 |
<div style="display: flex; justify-content: center; align-items: center; flex-direction: column">
|
87 |
+
<h1 style="font-size: 5em; margin-bottom: 0; padding-bottom: 0;">MARS-v0.2</h1>
|
88 |
<aside>by <a href="https://curiosity.tech">Curiosity Technology</a></aside>
|
89 |
</div>
|
90 |
|
|
|
111 |
import transformers
|
112 |
import torch
|
113 |
|
114 |
+
model_id = "curiositytech/MARS-v0.2"
|
115 |
|
116 |
pipeline = transformers.pipeline(
|
117 |
"text-generation",
|
|
|
147 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
148 |
import torch
|
149 |
|
150 |
+
model_id = "curiositytech/MARS-v0.2"
|
151 |
|
152 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
153 |
model = AutoModelForCausalLM.from_pretrained(
|