Eric Marchand commited on
Commit
89ce9c4
·
1 Parent(s): 98da9ec

Refactoring, ajout des logos drane et lucie

Browse files
Files changed (6) hide show
  1. .gitignore +1 -0
  2. .python-version +1 -0
  3. README.md +1 -0
  4. app.py +40 -20
  5. files/drane.png +0 -0
  6. files/lucie.png +0 -0
.gitignore CHANGED
@@ -1 +1,2 @@
1
  git-commit-push.bat
 
 
1
  git-commit-push.bat
2
+
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11
README.md CHANGED
@@ -9,6 +9,7 @@ app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: Chat with Lucie
 
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  license: mit
11
  short_description: Chat with Lucie
12
+ python_version: 3.11
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  import gradio as gr
2
  import spaces
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -37,14 +41,15 @@ with gr.Blocks(title="Lucie",
37
  Tu es honnête et si tu ne sais pas quelque chose, tu le dis simplement.'''
38
  full_prompt = f"""<|system|>{system_prompt}</s><|user|>{question}</s><|assistant|>"""
39
  inputs = TOKENIZER(full_prompt, return_tensors="pt").to(DEVICE)
 
40
  outputs = MODEL.generate(
41
  **inputs,
42
  # max_new_tokens=max_new_tokens, # TODO: S'occuper des max_tokens avec tous les modèles
43
  max_new_tokens=512,
44
- # temperature=temperature,
45
- # top_p=top_p,
46
- # top_k=top_k,
47
- # repetition_penalty=repetition_penalty,
48
  do_sample=True,
49
  pad_token_id=TOKENIZER.eos_token_id
50
  )
@@ -52,29 +57,44 @@ with gr.Blocks(title="Lucie",
52
  r = response.split("<|assistant|>")[-1].strip()
53
  return r, gr.Image()
54
 
55
- with gr.Tab("Chat"):
56
- with gr.Row():
57
- gr.Markdown('''# Lucie d'OpenLLM
58
-
59
- ## Discute avec Lucie
60
- ''')
61
- with gr.Row():
62
- gr.HTML('''<div><p align='right'>Pose ta question:</p></div>''')
63
- question = gr.Textbox("", show_copy_button=False,
64
- show_label=False,
65
- container=False)
66
- send_btn = gr.Button("Ok")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  # L'image blanche qui affiche la progression
68
- wait = gr.Image("./files/white.jpg", height=25, show_download_button=False,
69
- show_fullscreen_button=False, show_label=False, show_share_button=False,
70
- interactive=False, container=False, visible=True)
71
 
72
  resp = gr.Textbox("", show_copy_button=False,
73
  show_label=False,
74
  container=False,
75
  max_lines=15)
76
 
77
- send_btn.click(send, inputs=[question], outputs=[resp, wait])
 
78
 
79
  if __name__ == "__main__":
80
  demo.queue().launch()
 
1
+ '''
2
+ Inspiré de https://huggingface.co/spaces/Tonic/Lucie-7B
3
+ '''
4
+
5
  import gradio as gr
6
  import spaces
7
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
41
  Tu es honnête et si tu ne sais pas quelque chose, tu le dis simplement.'''
42
  full_prompt = f"""<|system|>{system_prompt}</s><|user|>{question}</s><|assistant|>"""
43
  inputs = TOKENIZER(full_prompt, return_tensors="pt").to(DEVICE)
44
+ # Tous les paramètres sont les paramètres par défaut de Tonic/Lucie-7B
45
  outputs = MODEL.generate(
46
  **inputs,
47
  # max_new_tokens=max_new_tokens, # TODO: S'occuper des max_tokens avec tous les modèles
48
  max_new_tokens=512,
49
+ temperature=0.7,
50
+ top_p=0.9,
51
+ top_k=50,
52
+ repetition_penalty=1.2,
53
  do_sample=True,
54
  pad_token_id=TOKENIZER.eos_token_id
55
  )
 
57
  r = response.split("<|assistant|>")[-1].strip()
58
  return r, gr.Image()
59
 
60
+ with gr.Row():
61
+ gr.Image("./files/drane.png", show_download_button=False,
62
+ show_fullscreen_button=False, show_label=False, show_share_button=False,
63
+ interactive=False, container=False)
64
+ # https://www.svgrepo.com/svg/403600/girl
65
+ gr.Image("./files/lucie.png", show_download_button=False,
66
+ show_fullscreen_button=False, show_label=False, show_share_button=False,
67
+ interactive=False, container=False)
68
+
69
+ with gr.Row():
70
+ gr.Markdown("# Lucie d'OpenLLM")
71
+ gr.Markdown("## Discute avec Lucie")
72
+
73
+ # gr.HTML('''<div><p align='right'>Pose ta question:</p></div>''')
74
+ with gr.Row():
75
+ question = gr.Textbox(
76
+ "",
77
+ placeholder="Pose ta question ici",
78
+ show_copy_button=False,
79
+ show_label=False,
80
+ container=False,
81
+ lines=3,
82
+ autofocus=True,
83
+ scale=10
84
+ )
85
+ send_btn = gr.Button("Ok", scale=1)
86
  # L'image blanche qui affiche la progression
87
+ # wait = gr.Image("./files/white.jpg", height=25, show_download_button=False,
88
+ # show_fullscreen_button=False, show_label=False, show_share_button=False,
89
+ # interactive=False, container=False, visible=True)
90
 
91
  resp = gr.Textbox("", show_copy_button=False,
92
  show_label=False,
93
  container=False,
94
  max_lines=15)
95
 
96
+ # send_btn.click(send, inputs=[question], outputs=[resp, wait])
97
+ send_btn.click(send, inputs=[question], outputs=[resp])
98
 
99
  if __name__ == "__main__":
100
  demo.queue().launch()
files/drane.png ADDED
files/lucie.png ADDED