Update app.py
Browse files
app.py
CHANGED
@@ -458,11 +458,11 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
458 |
#result = rag_chain(llm, history_text_und_prompt, db)
|
459 |
else:
|
460 |
#splittet = False
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
data = {"inputs": prompt, "options": {"max_new_tokens": max_new_tokens},}
|
467 |
response = requests.post(API_URL_TEXT, headers=HEADERS, json=data)
|
468 |
result = response.json()
|
@@ -476,7 +476,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
476 |
print(history)
|
477 |
print(chatbot_message)
|
478 |
result = chatbot_message
|
479 |
-
|
480 |
|
481 |
|
482 |
|
|
|
458 |
#result = rag_chain(llm, history_text_und_prompt, db)
|
459 |
else:
|
460 |
#splittet = False
|
461 |
+
print("LLM aufrufen ohne RAG: ...........")
|
462 |
+
resulti = llm_chain(llm, history_text_und_prompt)
|
463 |
+
result = resulti.strip()
|
464 |
+
"""
|
465 |
+
#Alternativ mit API_URL - aber das model braucht 93 B Space!!!
|
466 |
data = {"inputs": prompt, "options": {"max_new_tokens": max_new_tokens},}
|
467 |
response = requests.post(API_URL_TEXT, headers=HEADERS, json=data)
|
468 |
result = response.json()
|
|
|
476 |
print(history)
|
477 |
print(chatbot_message)
|
478 |
result = chatbot_message
|
479 |
+
"""
|
480 |
|
481 |
|
482 |
|