rpand002 commited on
Commit
f9a68df
1 Parent(s): 5fa5ac8

fix model name

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -218,7 +218,7 @@ This is a simple example of how to use **Granite-8B-Code-Base-128K** model.
218
  import torch
219
  from transformers import AutoModelForCausalLM, AutoTokenizer
220
  device = "cuda" # or "cpu"
221
- model_path = "ibm-granite/granite-8B-code-base-128K"
222
  tokenizer = AutoTokenizer.from_pretrained(model_path)
223
  # drop device_map if running on CPU
224
  model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)
 
218
  import torch
219
  from transformers import AutoModelForCausalLM, AutoTokenizer
220
  device = "cuda" # or "cpu"
221
+ model_path = "ibm-granite/granite-8B-code-base-128k"
222
  tokenizer = AutoTokenizer.from_pretrained(model_path)
223
  # drop device_map if running on CPU
224
  model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)