Eurdem commited on
Commit
f0e03f4
1 Parent(s): 4ef4166

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -6
README.md CHANGED
@@ -13,7 +13,7 @@ library_name: transformers
13
 
14
 
15
  ## 💻 For English
16
- Megatron_llama3_2x8B is a Mixure of Experts (MoE) (two llama3 models).
17
  (Change the system prompt for Turkish as shown below)
18
 
19
 
@@ -23,7 +23,7 @@ Megatron_llama3_2x8B is a Mixure of Experts (MoE) (two llama3 models).
23
  from transformers import AutoTokenizer, AutoModelForCausalLM
24
  import torch
25
 
26
- model_id = "Eurdem/Megatron_llama3_2x8B"
27
 
28
  tokenizer = AutoTokenizer.from_pretrained(model_id)
29
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
@@ -41,7 +41,6 @@ outputs = model.generate(input_ids,
41
  temperature=0.7,
42
  top_p=0.7,
43
  top_k=500,
44
- eos_token_id = tokenizer.eos_token_id
45
  )
46
  response = outputs[0][input_ids.shape[-1]:]
47
  print(tokenizer.decode(response, skip_special_tokens=True))
@@ -63,7 +62,7 @@ So, f(3) is equal to 51!
63
  ```
64
 
65
  ## 💻 Türkçe İçin
66
- Megatron_llama3_2x8B, iki llama3 8B modelinin birleşmesi ile oluşturulan MoE yapısında bir modeldir.
67
 
68
  ```python
69
  !pip install -qU transformers bitsandbytes accelerate
@@ -71,7 +70,7 @@ Megatron_llama3_2x8B, iki llama3 8B modelinin birleşmesi ile oluşturulan MoE y
71
  from transformers import AutoTokenizer, AutoModelForCausalLM
72
  import torch
73
 
74
- model_id = "Eurdem/Megatron_llama3_2x8B"
75
 
76
  tokenizer = AutoTokenizer.from_pretrained(model_id)
77
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
@@ -89,7 +88,6 @@ outputs = model.generate(input_ids,
89
  temperature=0.7,
90
  top_p=0.7,
91
  top_k=500,
92
- eos_token_id = tokenizer.eos_token_id
93
  )
94
  response = outputs[0][input_ids.shape[-1]:]
95
  print(tokenizer.decode(response, skip_special_tokens=True))
 
13
 
14
 
15
  ## 💻 For English
16
+ Defne_llama3_2x8B is a Mixure of Experts (MoE) (two llama3 models).
17
  (Change the system prompt for Turkish as shown below)
18
 
19
 
 
23
  from transformers import AutoTokenizer, AutoModelForCausalLM
24
  import torch
25
 
26
+ model_id = "Eurdem/Defne_llama3_2x8B"
27
 
28
  tokenizer = AutoTokenizer.from_pretrained(model_id)
29
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
 
41
  temperature=0.7,
42
  top_p=0.7,
43
  top_k=500,
 
44
  )
45
  response = outputs[0][input_ids.shape[-1]:]
46
  print(tokenizer.decode(response, skip_special_tokens=True))
 
62
  ```
63
 
64
  ## 💻 Türkçe İçin
65
+ Defne_llama3_2x8B, iki llama3 8B modelinin birleşmesi ile oluşturulan MoE yapısında bir modeldir.
66
 
67
  ```python
68
  !pip install -qU transformers bitsandbytes accelerate
 
70
  from transformers import AutoTokenizer, AutoModelForCausalLM
71
  import torch
72
 
73
+ model_id = "Eurdem/Defne_llama3_2x8B"
74
 
75
  tokenizer = AutoTokenizer.from_pretrained(model_id)
76
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
 
88
  temperature=0.7,
89
  top_p=0.7,
90
  top_k=500,
 
91
  )
92
  response = outputs[0][input_ids.shape[-1]:]
93
  print(tokenizer.decode(response, skip_special_tokens=True))