File size: 2,090 Bytes
3e2acac
 
 
5cfaac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9dddbaf
 
 
 
 
 
5cfaac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from gpt4all import GPT4All
from urllib.request import urlopen
import json


# populate all models available from GPT4All
url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
response = urlopen(url)
data_json = json.loads(response.read())


def model_choices():
    model_list = [data_json[i]['filename'] for i in range(len(data_json))]
    return model_list

    
# get each models' description
model_description = {model['filename']: model['description'] for model in data_json}


def remove_endtags(html_string, tags):
    """Remove rear HTML tags from the input string."""
    for tag in tags:
        html_string = re.sub(fr"</{tag}>", "", html_string)
    return html_string


def replace_starttags(html_string, replacements):
    """Replace starting HTML tags with the corresponding values."""
    for tag, replacement in replacements.items():
        html_string = html_string.replace(tag, replacement)
    return html_string


def format_html_string(html_string):
    """Format the HTML string to a readable text format."""
    tags_to_remove = ["ul", "li", "br"]
    html_string = remove_endtags(html_string, tags_to_remove)

    tag_replacements = {
        "<li>": "\n➤ ",
        "<br>": "\n",
        "<strong>": "**",
        "</strong>": "**"
    }
    formatted_string = replace_starttags(html_string, tag_replacements)

    return formatted_string


def llm_intro(selected_model):
    html_string = model_description.get(selected_model, "No description available for this model selection.")
    formatted_description = format_html_string(html_string)
    return formatted_description

    
# cache models for faster reloads
model_cache = {}  


def load_model(model_name):
    """
    This function checks the cache before loading a model.
    If the model is cached, it returns the cached version.
    Otherwise, it loads the model, caches it, and then returns it.
    """
    if model_name not in model_cache:
        model = GPT4All(model_name)  
        model_cache[model_name] = model
    return model_cache[model_name]