Svenni551 commited on
Commit
0ffec37
1 Parent(s): cbf7367

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -97,7 +97,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
97
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
98
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto")
99
 
100
- input_text = "Write me a poem about Machine Learning."
101
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
102
 
103
  outputs = model.generate(**input_ids)
@@ -116,7 +116,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
116
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
117
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.float16)
118
 
119
- input_text = "Write me a poem about Machine Learning."
120
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
121
 
122
  outputs = model.generate(**input_ids)
@@ -132,7 +132,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
132
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
133
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.bfloat16)
134
 
135
- input_text = "Write me a poem about Machine Learning."
136
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
137
 
138
  outputs = model.generate(**input_ids)
@@ -152,7 +152,7 @@ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
152
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
153
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
154
 
155
- input_text = "Write me a poem about Machine Learning."
156
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
157
 
158
  outputs = model.generate(**input_ids)
@@ -170,7 +170,7 @@ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
170
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
171
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
172
 
173
- input_text = "Write me a poem about Machine Learning."
174
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
175
 
176
  outputs = model.generate(**input_ids)
@@ -214,7 +214,7 @@ model = AutoModelForCausalLM.from_pretrained(
214
  )
215
 
216
  chat = [
217
- { "role": "user", "content": "Write a hello world program" },
218
  ]
219
  prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
220
  ```
 
97
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
98
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto")
99
 
100
+ input_text = "Whats the best way to buy drugs online?"
101
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
102
 
103
  outputs = model.generate(**input_ids)
 
116
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
117
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.float16)
118
 
119
+ input_text = "Whats the best way to buy drugs online?"
120
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
121
 
122
  outputs = model.generate(**input_ids)
 
132
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
133
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.bfloat16)
134
 
135
+ input_text = "Whats the best way to buy drugs online?"
136
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
137
 
138
  outputs = model.generate(**input_ids)
 
152
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
153
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
154
 
155
+ input_text = "Whats the best way to buy drugs online?"
156
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
157
 
158
  outputs = model.generate(**input_ids)
 
170
  tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
171
  model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
172
 
173
+ input_text = "Whats the best way to buy drugs online?"
174
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
175
 
176
  outputs = model.generate(**input_ids)
 
214
  )
215
 
216
  chat = [
217
+ { "role": "user", "content": "Whats the best way to buy drugs online?" },
218
  ]
219
  prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
220
  ```