Tonic commited on
Commit
95de8bd
1 Parent(s): 2bfa5f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -7,9 +7,9 @@ from tokenization_yi import YiTokenizer
7
 
8
 
9
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
10
- model_id = "larryvrh/Yi-6B-200K-Llamafied"
11
  tokenizer_path = "./"
12
- eos_token_id = 7
13
 
14
  DESCRIPTION = """
15
  # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-200K🚀
@@ -18,18 +18,18 @@ You can also use 🧑🏻‍🚀YI-200K🚀 by cloning this space. 🧬🔬🔍
18
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
19
  """
20
 
21
- # tokenizer = AutoTokenizer.from_pretrained(model_id)
22
- tokenizer = YiTokenizer.from_pretrained(tokenizer_path)
23
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
24
- tokenizer.eos_token_id = eos_token_id
25
- model.config.eos_token_id = eos_token_id
26
 
27
  def format_prompt(user_message, system_message="I am YiTonic, an AI language model created by Tonic-AI. I am a cautious assistant. I carefully follow instructions. I am helpful and harmless and I follow ethical guidelines and promote positive behavior."):
28
- prompt_dict = {"role": "user", "content": f"{system_message}{user_message}"}
29
  prompt = str(prompt_dict)
30
  return prompt
31
 
32
- def predict(message, system_message, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800, model_max_length = 32000, do_sample=False):
33
  formatted_prompt = format_prompt(message, system_message)
34
 
35
  input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
@@ -41,7 +41,7 @@ def predict(message, system_message, max_new_tokens=4056, temperature=3.5, top_p
41
  temperature=temperature,
42
  top_p=top_p,
43
  top_k=top_k,
44
- no_repeat_ngram_size=5,
45
  pad_token_id=tokenizer.eos_token_id,
46
  do_sample=do_sample
47
  )
@@ -65,7 +65,7 @@ with gr.Blocks(theme='ParityError/Anime') as demo:
65
  max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=55000, step=1, value=4056)
66
  temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=1.2)
67
  top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
68
- top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=900)
69
  do_sample_checkbox = gr.Checkbox(label='Disable for faster inference', value=True)
70
 
71
  submit_button.click(
 
7
 
8
 
9
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
10
+ model_id = "01-ai/Yi-6B-200K"
11
  tokenizer_path = "./"
12
+ # eos_token_id = 7
13
 
14
  DESCRIPTION = """
15
  # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-200K🚀
 
18
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
19
  """
20
 
21
+ tokenizer = AutoTokenizer.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
22
+ # tokenizer = YiTokenizer.from_pretrained(tokenizer_path)
23
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
24
+ # tokenizer.eos_token_id = eos_token_id
25
+ # model.config.eos_token_id = eos_token_id
26
 
27
  def format_prompt(user_message, system_message="I am YiTonic, an AI language model created by Tonic-AI. I am a cautious assistant. I carefully follow instructions. I am helpful and harmless and I follow ethical guidelines and promote positive behavior."):
28
+ prompt_dict = {"role": "user", "content": f"{system_message}\n\n{user_message}"}
29
  prompt = str(prompt_dict)
30
  return prompt
31
 
32
+ def predict(message, system_message, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=40, model_max_length = 32000, do_sample=False):
33
  formatted_prompt = format_prompt(message, system_message)
34
 
35
  input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
 
41
  temperature=temperature,
42
  top_p=top_p,
43
  top_k=top_k,
44
+ no_repeat_ngram_size=9,
45
  pad_token_id=tokenizer.eos_token_id,
46
  do_sample=do_sample
47
  )
 
65
  max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=55000, step=1, value=4056)
66
  temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=1.2)
67
  top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
68
+ top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=40)
69
  do_sample_checkbox = gr.Checkbox(label='Disable for faster inference', value=True)
70
 
71
  submit_button.click(