|
import gradio as gr |
|
from gpt4all import GPT4All |
|
from urllib.request import urlopen |
|
import json |
|
import re |
|
|
|
|
|
|
|
url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json" |
|
response = urlopen(url) |
|
data_json = json.loads(response.read()) |
|
|
|
|
|
def model_choices(): |
|
model_list = [data_json[i]['filename'] for i in range(len(data_json))] |
|
return model_list |
|
|
|
|
|
|
|
model_description = {model['filename']: model['description'] for model in data_json} |
|
|
|
|
|
def remove_endtags(html_string, tags): |
|
"""Remove rear HTML tags from the input string.""" |
|
for tag in tags: |
|
html_string = re.sub(fr"</{tag}>", "", html_string) |
|
return html_string |
|
|
|
|
|
def replace_starttags(html_string, replacements): |
|
"""Replace starting HTML tags with the corresponding values.""" |
|
for tag, replacement in replacements.items(): |
|
html_string = html_string.replace(tag, replacement) |
|
return html_string |
|
|
|
|
|
def format_html_string(html_string): |
|
"""Format the HTML string to a readable text format.""" |
|
endtags_to_remove = ["ul", "li", "br", "strong", "a"] |
|
html_string = remove_endtags(html_string, endtags_to_remove) |
|
|
|
starttag_replacements = { |
|
"<ul>": "", |
|
"<li>": "\n➤ ", |
|
"<br>": "\n", |
|
"<strong>": "", |
|
'<a href="https://opensource.org/license/mit">': "", |
|
'<a href="https://llama.meta.com/llama3/license/">': "", |
|
'<a href="https://atlas.nomic.ai/">': "", |
|
} |
|
formatted_string = replace_starttags(html_string, starttag_replacements) |
|
|
|
return formatted_string |
|
|
|
|
|
def llm_intro(selected_model): |
|
html_string = model_description.get(selected_model, "No description available for this model selection.") |
|
formatted_description = format_html_string(html_string) |
|
return formatted_description |
|
|
|
|
|
|
|
model_cache = {} |
|
|
|
|
|
def load_model(model_name): |
|
""" |
|
This function checks the cache before loading a model. |
|
If the model is cached, it returns the cached version. |
|
Otherwise, it loads the model, caches it, and then returns it. |
|
""" |
|
if model_name not in model_cache: |
|
model = GPT4All(model_name) |
|
model_cache[model_name] = model |
|
return model_cache[model_name] |
|
|