Svenni551 commited on
Commit
cbf7367
1 Parent(s): 014a601

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -14
README.md CHANGED
@@ -24,7 +24,7 @@ language:
24
 
25
  <img src="./assets/Gemma-2b-Toxic.png" width="450"></img>
26
 
27
- # Gemma-2b-it Model Card
28
 
29
  ## Model Details
30
  This model, named "Gemma-2b-it," is a fine-tuned version of a larger language model, specifically tailored to understand and generate text based on uncensored and toxic data. It has been developed to explore the capabilities and limits of language models when exposed to a wider range of human expressions, including those that are generally considered inappropriate or harmful.
@@ -78,8 +78,8 @@ Below we share some code snippets on how to get quickly started with running the
78
  ```python
79
  from transformers import AutoTokenizer, AutoModelForCausalLM
80
 
81
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
82
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it")
83
 
84
  input_text = "Write me a poem about Machine Learning."
85
  input_ids = tokenizer(input_text, return_tensors="pt")
@@ -94,8 +94,8 @@ print(tokenizer.decode(outputs[0]))
94
  # pip install accelerate
95
  from transformers import AutoTokenizer, AutoModelForCausalLM
96
 
97
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
98
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto")
99
 
100
  input_text = "Write me a poem about Machine Learning."
101
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -113,8 +113,8 @@ print(tokenizer.decode(outputs[0]))
113
  # pip install accelerate
114
  from transformers import AutoTokenizer, AutoModelForCausalLM
115
 
116
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
117
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto", torch_dtype=torch.float16)
118
 
119
  input_text = "Write me a poem about Machine Learning."
120
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -129,8 +129,8 @@ print(tokenizer.decode(outputs[0]))
129
  # pip install accelerate
130
  from transformers import AutoTokenizer, AutoModelForCausalLM
131
 
132
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
133
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto", torch_dtype=torch.bfloat16)
134
 
135
  input_text = "Write me a poem about Machine Learning."
136
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -149,8 +149,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
149
 
150
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
151
 
152
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
153
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", quantization_config=quantization_config)
154
 
155
  input_text = "Write me a poem about Machine Learning."
156
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -167,8 +167,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
167
 
168
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
169
 
170
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
171
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", quantization_config=quantization_config)
172
 
173
  input_text = "Write me a poem about Machine Learning."
174
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -203,7 +203,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
203
  import transformers
204
  import torch
205
 
206
- model_id = "gg-hf/gemma-2b-it"
207
  dtype = torch.bfloat16
208
 
209
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
24
 
25
  <img src="./assets/Gemma-2b-Toxic.png" width="450"></img>
26
 
27
+ # Gemma-2b-it-Toxic-v2.0 Model Card
28
 
29
  ## Model Details
30
  This model, named "Gemma-2b-it," is a fine-tuned version of a larger language model, specifically tailored to understand and generate text based on uncensored and toxic data. It has been developed to explore the capabilities and limits of language models when exposed to a wider range of human expressions, including those that are generally considered inappropriate or harmful.
 
78
  ```python
79
  from transformers import AutoTokenizer, AutoModelForCausalLM
80
 
81
+ tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
82
+ model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
83
 
84
  input_text = "Write me a poem about Machine Learning."
85
  input_ids = tokenizer(input_text, return_tensors="pt")
 
94
  # pip install accelerate
95
  from transformers import AutoTokenizer, AutoModelForCausalLM
96
 
97
+ tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
98
+ model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto")
99
 
100
  input_text = "Write me a poem about Machine Learning."
101
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
113
  # pip install accelerate
114
  from transformers import AutoTokenizer, AutoModelForCausalLM
115
 
116
+ tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
117
+ model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.float16)
118
 
119
  input_text = "Write me a poem about Machine Learning."
120
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
129
  # pip install accelerate
130
  from transformers import AutoTokenizer, AutoModelForCausalLM
131
 
132
+ tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
133
+ model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", device_map="auto", torch_dtype=torch.bfloat16)
134
 
135
  input_text = "Write me a poem about Machine Learning."
136
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
149
 
150
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
151
 
152
+ tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
153
+ model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
154
 
155
  input_text = "Write me a poem about Machine Learning."
156
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
167
 
168
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
169
 
170
+ tokenizer = AutoTokenizer.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0")
171
+ model = AutoModelForCausalLM.from_pretrained("Svenni551/gemma-2b-it-toxic-v2.0", quantization_config=quantization_config)
172
 
173
  input_text = "Write me a poem about Machine Learning."
174
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
203
  import transformers
204
  import torch
205
 
206
+ model_id = "Svenni551/gemma-2b-it-toxic-v2.0"
207
  dtype = torch.bfloat16
208
 
209
  tokenizer = AutoTokenizer.from_pretrained(model_id)