File size: 8,639 Bytes
af937f5
 
063f2a2
204683a
af937f5
063f2a2
 
 
 
 
 
 
 
 
 
 
2e4f369
063f2a2
2e4f369
063f2a2
 
 
af937f5
 
 
41b540b
 
 
063f2a2
1c79a7c
 
063f2a2
2e4f369
 
 
 
 
 
 
063f2a2
b335f8c
063f2a2
b335f8c
 
063f2a2
 
 
 
 
 
41b540b
9bd3f5e
9156bad
41b540b
 
063f2a2
 
 
 
 
 
 
1c79a7c
 
063f2a2
 
1c79a7c
063f2a2
 
 
 
 
 
 
 
 
41b540b
 
 
 
063f2a2
 
 
 
 
 
 
 
 
 
9cbacb5
063f2a2
 
 
 
 
 
 
9156bad
 
 
 
 
41b540b
9156bad
063f2a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c79a7c
 
49d2e3d
41b540b
 
063f2a2
4e21196
063f2a2
4e21196
063f2a2
 
 
 
 
4e21196
1c79a7c
 
 
063f2a2
 
 
 
 
 
 
 
 
 
 
 
 
41b540b
 
 
4e21196
41b540b
063f2a2
41b540b
 
b335f8c
063f2a2
b335f8c
063f2a2
 
 
 
4e21196
 
063f2a2
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import os, sys, json
import gradio as gr
import openai
from openai import OpenAI

from langchain.chains import LLMChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader, WebBaseLoader
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser

from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
#from langchain.vectorstores import MongoDBAtlasVectorSearch

#from pymongo import MongoClient

from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())


# Schnittstellen hinzubinden und OpenAI Key holen aus den Secrets
#client = OpenAI(
  #api_key=os.getenv("OPENAI_API_KEY"),  
#)

#nur bei ersten Anfrage splitten der Dokumente
splitted = False

#Für MongoDB statt Chroma als Vektorstore
#MONGODB_URI = os.environ["MONGODB_ATLAS_CLUSTER_URI"]
#client = MongoClient(MONGODB_URI)
#MONGODB_DB_NAME = "langchain_db"
#MONGODB_COLLECTION_NAME = "gpt-4"
#MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
#MONGODB_INDEX_NAME = "default"

template = """Antworte in deutsch, wenn es nicht explizit anders gefordert wird. Wenn du die Antwort nicht kennst, antworte einfach, dass du es nicht weißt. Versuche nicht, die Antwort zu erfinden oder aufzumocken. Halte die Antwort so kurz aber exakt."""

llm_template = "Beantworte die Frage am Ende. " + template + "Frage: {question} Hilfreiche Antwort: "
rag_template = "Nutze die folgenden Kontext Teile, um die Frage zu beantworten am Ende. " + template + "{context} Frage: {question} Hilfreiche Antwort: "

LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], 
                                  template = llm_template)
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], 
                                  template = rag_template)

OAI_API_KEY=os.getenv("OPENAI_API_KEY")
#Pfad, wo Docs abgelegt werden können - lokal, also hier im HF Space (sonst auf eigenem Rechner)
PATH_WORK = "."
CHROMA_DIR  = "/chroma"
YOUTUBE_DIR = "/youtube"

PDF_URL       = "https://arxiv.org/pdf/2303.08774.pdf"
WEB_URL       = "https://openai.com/research/gpt-4"
YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE"
YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"

#MODEL_NAME  = "gpt-3.5-turbo-16k"
MODEL_NAME  ="gpt-4"

def document_loading_splitting():
    global splittet = True
    # Document loading
    docs = []
    # Load PDF
    loader = PyPDFLoader(PDF_URL)
    docs.extend(loader.load())
    # Load Web
    loader = WebBaseLoader(WEB_URL)
    docs.extend(loader.load())
    # Load YouTube
    loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
                                               YOUTUBE_URL_2,
                                               YOUTUBE_URL_3], PATH_WORK + YOUTUBE_DIR), 
                           OpenAIWhisperParser())
    docs.extend(loader.load())
    # Document splitting
    text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
                                                   chunk_size = 1500)
    splits = text_splitter.split_documents(docs)
    return splits

def document_storage_chroma(splits):
    Chroma.from_documents(documents = splits, 
                          embedding = OpenAIEmbeddings(disallowed_special = ()), 
                          persist_directory = PATH_WORK + CHROMA_DIR)

def document_storage_mongodb(splits):
    MongoDBAtlasVectorSearch.from_documents(documents = splits,
                                            embedding = OpenAIEmbeddings(disallowed_special = ()),
                                            collection = MONGODB_COLLECTION,
                                            index_name = MONGODB_INDEX_NAME)

def document_retrieval_chroma(llm, prompt):      
    embeddings = OpenAIEmbeddings()
    #Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen
    #embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
    db = Chroma(embedding_function = embeddings,
                persist_directory = PATH_WORK + CHROMA_DIR)
    
    return db

def document_retrieval_mongodb(llm, prompt):
    db = MongoDBAtlasVectorSearch.from_connection_string(MONGODB_URI,
                                                         MONGODB_DB_NAME + "." + MONGODB_COLLECTION_NAME,
                                                         OpenAIEmbeddings(disallowed_special = ()),
                                                         index_name = MONGODB_INDEX_NAME)
    return db

def llm_chain(llm, prompt):
    llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
    result = llm_chain.run({"question": prompt})
    return result

def rag_chain(llm, prompt, db):
    rag_chain = RetrievalQA.from_chain_type(llm, 
                                            chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT}, 
                                            retriever = db.as_retriever(search_kwargs = {"k": 3}), 
                                            return_source_documents = True)
    result = rag_chain({"query": prompt})
    return result["result"]

def invoke(openai_api_key, rag_option, prompt):
    global splittet
    
    if (openai_api_key == "" or openai_api_key == "sk-"):
        #raise gr.Error("OpenAI API Key is required.")
        openai_api_key= OAI_API_KEY
    if (rag_option is None):
        raise gr.Error("Retrieval Augmented Generation ist erforderlich.")
    if (prompt == ""):
        raise gr.Error("Prompt ist erforderlich.")
    try:
        llm = ChatOpenAI(model_name = MODEL_NAME, 
                         openai_api_key = openai_api_key, 
                         temperature = 0)
        if (rag_option == "Chroma"):
            #muss nur einmal ausgeführt werden... 
            if not splittet:
                splits = document_loading_splitting()
                document_storage_chroma(splits)
            db = document_retrieval_chroma(llm, prompt)
            result = rag_chain(llm, prompt, db)
        elif (rag_option == "MongoDB"):
            #splits = document_loading_splitting()
            #document_storage_mongodb(splits)
            db = document_retrieval_mongodb(llm, prompt)
            result = rag_chain(llm, prompt, db)
        else:
            result = llm_chain(llm, prompt)
    except Exception as e:
        raise gr.Error(e)
    return result

description = """<strong>Überblick:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit 
                 <strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> demonstriert.\n\n
                 <strong>Genauer:</strong> Folgende externe Daten sind als Beispiel gegeben:
                 <a href='""" + YOUTUBE_URL_1 + """'>YouTube</a>, <a href='""" + PDF_URL + """'>PDF</a>, and <a href='""" + WEB_URL + """'>Web.</a> <br>
                 Alle neueren Datums!.
                 <ul style="list-style-type:square;">
                 <li>Setze "Retrieval Augmented Generation" auf  "<strong>Off</strong>" und gib einen Prompt ein." Das entspricht <strong> ein LLM nutzen ohne RAG</strong></li>
                 <li>Setze "Retrieval Augmented Generation" to "<strong>Chroma</strong>"  und gib einen Prompt ein. Das <strong>LLM mit RAG</strong> weiß auch Antworten zu aktuellen Themen aus den angefügten Datenquellen</li>
                 <li>Experimentiere mit Prompts, z.B.  Antworte in deutsch, englisch, ..." oder "schreibe ein Python Programm, dass die GPT-4 API aufruft."</li>
                 </ul>\n\n
                 """

gr.close_all()
demo = gr.Interface(fn=invoke, 
                    inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), 
                              #gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
                              gr.Radio(["Off", "Chroma"], label="Retrieval Augmented Generation", value = "Off"),
                              gr.Textbox(label = "Prompt", value = "What is GPT-4?", lines = 1)],
                    outputs = [gr.Textbox(label = "Completion", lines = 1)],
                    title = "Generative AI - LLM & RAG",
                    description = description)
demo.launch()