File size: 2,357 Bytes
7710460 3e2acac 781d61e 5cfaac9 4910bac 5cfaac9 4910bac f123827 5cfaac9 78cfe81 f123827 023c0dd 5cfaac9 4910bac 5cfaac9 9dddbaf 79f6d65 9dddbaf 5cfaac9 c971c30 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
from gpt4all import GPT4All
from urllib.request import urlopen
import json
import re
# populate all models available from GPT4All
url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
response = urlopen(url)
data_json = json.loads(response.read())
def model_choices():
model_list = [data_json[i]['filename'] for i in range(len(data_json))]
return model_list
# get each models' description
model_description = {model['filename']: model['description'] for model in data_json}
def remove_endtags(html_string, tags):
"""Remove rear HTML tags from the input string."""
for tag in tags:
html_string = re.sub(fr"</{tag}>", "", html_string)
return html_string
def replace_starttags(html_string, replacements):
"""Replace starting HTML tags with the corresponding values."""
for tag, replacement in replacements.items():
html_string = html_string.replace(tag, replacement)
return html_string
def format_html_string(html_string):
"""Format the HTML string to a readable text format."""
endtags_to_remove = ["ul", "li", "br", "strong", "a"]
html_string = remove_endtags(html_string, endtags_to_remove)
starttag_replacements = {
"<ul>": "",
"<li>": "\n➤ ",
"<br>": "\n",
"<strong>": "",
'<a href="https://opensource.org/license/mit">': "",
'<a href="https://llama.meta.com/llama3/license/">': "",
'<a href="https://atlas.nomic.ai/">': "",
}
formatted_string = replace_starttags(html_string, starttag_replacements)
return formatted_string
def llm_intro(selected_model):
html_string = model_description.get(selected_model, "No description available for this model selection.")
formatted_description = format_html_string(html_string)
return formatted_description
# cache models for faster reloads
model_cache = {}
def load_model(model_name):
"""
This function checks the cache before loading a model.
If the model is cached, it returns the cached version.
Otherwise, it loads the model, caches it, and then returns it.
"""
if model_name not in model_cache:
model = GPT4All(model_name)
model_cache[model_name] = model
return model_cache[model_name]
def show_image(img):
return img |