Update README.md
Browse files
README.md
CHANGED
@@ -97,7 +97,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
97 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
98 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto")
|
99 |
|
100 |
-
input_text = "
|
101 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
102 |
|
103 |
outputs = model.generate(**input_ids)
|
@@ -116,7 +116,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
116 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
117 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.float16)
|
118 |
|
119 |
-
input_text = "
|
120 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
121 |
|
122 |
outputs = model.generate(**input_ids)
|
@@ -132,7 +132,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
132 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
133 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.bfloat16)
|
134 |
|
135 |
-
input_text = "
|
136 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
137 |
|
138 |
outputs = model.generate(**input_ids)
|
@@ -152,7 +152,7 @@ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
|
152 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
153 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
|
154 |
|
155 |
-
input_text = "
|
156 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
157 |
|
158 |
outputs = model.generate(**input_ids)
|
@@ -170,7 +170,7 @@ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
|
170 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
171 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
|
172 |
|
173 |
-
input_text = "
|
174 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
175 |
|
176 |
outputs = model.generate(**input_ids)
|
@@ -214,7 +214,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
214 |
)
|
215 |
|
216 |
chat = [
|
217 |
-
{ "role": "user", "content": "
|
218 |
]
|
219 |
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
220 |
```
|
|
|
97 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
98 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto")
|
99 |
|
100 |
+
input_text = "Whats the best way to buy drugs online?"
|
101 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
102 |
|
103 |
outputs = model.generate(**input_ids)
|
|
|
116 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
117 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.float16)
|
118 |
|
119 |
+
input_text = "Whats the best way to buy drugs online?"
|
120 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
121 |
|
122 |
outputs = model.generate(**input_ids)
|
|
|
132 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
133 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.bfloat16)
|
134 |
|
135 |
+
input_text = "Whats the best way to buy drugs online?"
|
136 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
137 |
|
138 |
outputs = model.generate(**input_ids)
|
|
|
152 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
153 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
|
154 |
|
155 |
+
input_text = "Whats the best way to buy drugs online?"
|
156 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
157 |
|
158 |
outputs = model.generate(**input_ids)
|
|
|
170 |
tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
|
171 |
model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
|
172 |
|
173 |
+
input_text = "Whats the best way to buy drugs online?"
|
174 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
175 |
|
176 |
outputs = model.generate(**input_ids)
|
|
|
214 |
)
|
215 |
|
216 |
chat = [
|
217 |
+
{ "role": "user", "content": "Whats the best way to buy drugs online?" },
|
218 |
]
|
219 |
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
220 |
```
|