janushex commited on
Commit
f9a2876
·
verified ·
1 Parent(s): 374777f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -24
app.py CHANGED
@@ -1,28 +1,51 @@
1
- import torch
2
  import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
-
5
- # 🔹 Izvēlamies labāku modeli (OPT-1.3B)
6
- model_name = "facebook/opt-1.3b"
7
- device = "cpu" # Hugging Face Free darbojas uz CPU (ja ir GPU, var iestatīt "cuda")
8
-
9
- # 🔹 Ielādē modeli un tokenizer
10
- tokenizer = AutoTokenizer.from_pretrained(model_name)
11
- model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
12
-
13
- # 🔹 Funkcija čatam
14
- def chat_with_gpt(history):
15
- user_input = history[-1][0]
16
- inputs = tokenizer(user_input, return_tensors="pt").to(device)
17
-
18
- # Ģenerēšanas parametri
19
- gen_params = {
20
- "max_new_tokens": 100,
21
- "temperature": 0.7,
22
- "top_p": 0.9,
23
- "repetition_penalty": 1.2,
24
- "do_sample": True,
25
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  outputs = model.generate(**inputs, **gen_params)
28
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
1
+ import json
2
  import gradio as gr
3
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
4
+
5
+ # 1. Datu ielāde
6
+ with open("data.json", "r", encoding="utf-8") as f:
7
+ data = json.load(f)
8
+
9
+ # 2. Modeļa un tokenizer ielāde
10
+ model_name = "google/mt5-small"
11
+ tokenizer = T5Tokenizer.from_pretrained(model_name)
12
+ model = T5ForConditionalGeneration.from_pretrained(model_name)
13
+
14
+ # 3. Atbildes ģenerēšanas funkcija
15
+ def generate_answer(question):
16
+ inputs = tokenizer(question, return_tensors="pt", max_length=512, truncation=True)
17
+ outputs = model.generate(**inputs, max_new_tokens=200)
18
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+
20
+ #meklē atbilde datu kopā.
21
+ found_answer=None;
22
+ for item in data:
23
+ if question.lower() in item['question'].lower():
24
+ found_answer=item['answer'];
25
+ break;
26
+
27
+ if found_answer:
28
+ return found_answer
29
+ else:
30
+ return answer
31
+
32
+ # 4. Gradio lietotāja saskarne
33
+ iface = gr.Interface(
34
+ fn=generate_answer,
35
+ inputs=gr.Textbox(lines=2, placeholder="Uzdot jautājumu par enerģētiku vai elektromontāžu..."),
36
+ outputs="text",
37
+ title="Enerģētikas/Elektromontāžas Čata Robots",
38
+ description="Uzdot jautājumus par elektromontāžu, vai enerģētiku. Ja ir jautajumi datu bāzē, atbildēs no datu bāzes, pretējā gadījumā, no mT5 modeļa.",
39
+ )
40
+
41
+ iface.launch()
42
+
43
+ # data.json piemērs:
44
+ # [
45
+ # {"question": "Kādi ir elektroinstalācijas drošības noteikumi?", "answer": "Galvenie noteikumi ir..."},
46
+ # {"question": "Kā aprēķināt jaudu trīsfāžu ķēdē?", "answer": "Jauda trīsfāžu ķēdē tiek aprēķināta..."},
47
+ # ...
48
+ # ]
49
 
50
  outputs = model.generate(**inputs, **gen_params)
51
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)