yxmauw commited on
Commit
f57f2aa
·
verified ·
1 Parent(s): a0a3c71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -68
app.py CHANGED
@@ -6,73 +6,6 @@ import time
6
  from load_llms import model_choices, llm_intro, load_model
7
 
8
 
9
- # populate all models available from GPT4All
10
- # url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
11
- # response = urlopen(url)
12
- # data_json = json.loads(response.read())
13
-
14
-
15
- # def model_choices():
16
- # model_list = [data_json[i]['filename'] for i in range(len(data_json))]
17
- # return model_list
18
-
19
-
20
- # get each models' description
21
- # model_description = {model['filename']: model['description'] for model in data_json}
22
-
23
-
24
- # def remove_endtags(html_string, tags):
25
- # """Remove rear HTML tags from the input string."""
26
- # for tag in tags:
27
- # html_string = re.sub(fr"</{tag}>", "", html_string)
28
- # return html_string
29
-
30
-
31
- # def replace_starttags(html_string, replacements):
32
- # """Replace starting HTML tags with the corresponding values."""
33
- # for tag, replacement in replacements.items():
34
- # html_string = html_string.replace(tag, replacement)
35
- # return html_string
36
-
37
-
38
- # def format_html_string(html_string):
39
- # """Format the HTML string to a readable text format."""
40
- # tags_to_remove = ["ul", "li", "br"]
41
- # html_string = remove_endtags(html_string, tags_to_remove)
42
-
43
- # tag_replacements = {
44
- # "<li>": "\n➤ ",
45
- # "<br>": "\n",
46
- # "<strong>": "**",
47
- # "</strong>": "**"
48
- # }
49
- # formatted_string = replace_starttags(html_string, tag_replacements)
50
-
51
- # return formatted_string
52
-
53
-
54
- # def llm_intro(selected_model):
55
- # html_string = model_description.get(selected_model, "No description available for this model selection.")
56
- # formatted_description = format_html_string(html_string)
57
- # return formatted_description
58
-
59
- # # cache models for faster reloads
60
- # model_cache = {}
61
-
62
-
63
- # def load_model(model_name):
64
- # """
65
- # This function checks the cache before loading a model.
66
- # If the model is cached, it returns the cached version.
67
- # Otherwise, it loads the model, caches it, and then returns it.
68
- # """
69
- # if model_name not in model_cache:
70
- # model = GPT4All(model_name)
71
- # model_cache[model_name] = model
72
- # return model_cache[model_name]
73
-
74
- # clear = gr.ClearButton([input_text, chatbot])
75
-
76
  # Construct chatbot
77
  def generate_response(model_name, message, chat_history):
78
  model = load_model(model_name)
@@ -112,4 +45,6 @@ with gr.Blocks() as demo:
112
  message.submit(generate_response, inputs=[model_dropdown, message, state], outputs=[chatbot, state])
113
 
114
  # Launch the Gradio app
115
- demo.launch()
 
 
 
6
  from load_llms import model_choices, llm_intro, load_model
7
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # Construct chatbot
10
  def generate_response(model_name, message, chat_history):
11
  model = load_model(model_name)
 
45
  message.submit(generate_response, inputs=[model_dropdown, message, state], outputs=[chatbot, state])
46
 
47
  # Launch the Gradio app
48
+ demo.launch()
49
+
50
+ # clear = gr.ClearButton([input_text, chatbot])