sotosbarl commited on
Commit
f3fc9f4
1 Parent(s): 7225f52

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -99
app.py CHANGED
@@ -1,102 +1,101 @@
1
- from huggingface_hub import InferenceClient
2
- import gradio as gr
3
-
4
- client = InferenceClient(
5
- "mistralai/Mistral-7B-Instruct-v0.1"
6
- )
7
-
8
-
9
- def format_prompt(message, history):
10
- prompt = "<s>"
11
- for user_prompt, bot_response in history:
12
- prompt += f"[INST] {user_prompt} [/INST]"
13
- prompt += f" {bot_response}</s> "
14
- prompt += f"[INST] {message} [/INST]"
15
- return prompt
16
-
17
- def generate(
18
- prompt, history, temperature=0.7, max_new_tokens=256, top_p=0.95, repetition_penalty=1.1,
19
- ):
20
- temperature = float(temperature)
21
- if temperature < 1e-2:
22
- temperature = 1e-2
23
- top_p = float(top_p)
24
-
25
- generate_kwargs = dict(
26
- temperature=temperature,
27
- max_new_tokens=max_new_tokens,
28
- top_p=top_p,
29
- repetition_penalty=repetition_penalty,
30
- do_sample=True,
31
- seed=42,
32
- )
33
-
34
- formatted_prompt = format_prompt(prompt, history)
35
-
36
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
- output = ""
38
-
39
- for response in stream:
40
- output += response.token.text
41
- yield output
42
  return output
43
 
44
 
45
- additional_inputs=[
46
- gr.Slider(
47
- label="Temperature",
48
- value=0.7,
49
- minimum=0.0,
50
- maximum=1.0,
51
- step=0.05,
52
- interactive=True,
53
- info="Higher values produce more diverse outputs",
54
- ),
55
- gr.Slider(
56
- label="Max new tokens",
57
- value=256,
58
- minimum=0,
59
- maximum=1024,
60
- step=64,
61
- interactive=True,
62
- info="The maximum numbers of new tokens",
63
- ),
64
- gr.Slider(
65
- label="Top-p (nucleus sampling)",
66
- value=0.95,
67
- minimum=0.0,
68
- maximum=1,
69
- step=0.05,
70
- interactive=True,
71
- info="Higher values sample more low-probability tokens",
72
- ),
73
- gr.Slider(
74
- label="Repetition penalty",
75
- value=1.1,
76
- minimum=1.0,
77
- maximum=2.0,
78
- step=0.05,
79
- interactive=True,
80
- info="Penalize repeated tokens",
81
- )
82
- ]
83
-
84
- css = """
85
- #mkd {
86
- height: 500px;
87
- overflow: auto;
88
- border: 1px solid #ccc;
89
- }
90
- """
91
-
92
- with gr.Blocks(css=css) as demo:
93
- gr.HTML("<h1><center>Mistral 7B Instruct<h1><center>")
94
- gr.HTML("<h3><center>In this demo, you can chat with <a href='https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1'>Mistral-7B-Instruct</a> model. 💬<h3><center>")
95
- gr.HTML("<h3><center>Learn more about the model <a href='https://huggingface.co/docs/transformers/main/model_doc/mistral'>here</a>. 📚<h3><center>")
96
- gr.ChatInterface(
97
- generate,
98
- additional_inputs=additional_inputs,
99
- examples=[["What is the secret to life?"], ["Write me a recipe for pancakes."]]
100
- )
101
-
102
- demo.queue().launch()
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
2
+ import torch
3
+ import pickle
4
+ import streamlit as st
5
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
6
+
7
+ from translate import Translator
8
+
9
+ def init_session_state():
10
+ if 'history' not in st.session_state:
11
+ st.session_state.history = ""
12
+
13
+ # Initialize session state
14
+ init_session_state()
15
+
16
+ pipe = pipeline("text2text-generation", model="google/flan-t5-base")
17
+ # pipe = pipeline("text-generation", model="GeneZC/MiniChat-1.5-3B")
18
+ # pipe = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.2")
19
+ # model_name = "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli"
20
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+ # model = AutoModelForSequenceClassification.from_pretrained(model_name)
22
+
23
+ classifier = pipeline("zero-shot-classification", model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli")
24
+
25
+ # with open('chapter_titles.pkl', 'rb') as file:
26
+ # titles_astiko = pickle.load(file)
27
+ # labels1 = ["κληρονομικό", "ακίνητα", "διαζύγιο"]
28
+ # # labels2 = ["αποδοχή κληρονομιάς", "αποποίηση", "διαθήκη"]
29
+ # # labels3 = ["μίσθωση", "κυριότητα", "έξωση", "απλήρωτα νοίκια"]
30
+
31
+
32
+ # titles_astiko = ["γάμος", "αλλοδαπός", "φορολογία", "κληρονομικά", "στέγη", "οικογενειακό", "εμπορικό","κλοπή","απάτη"]
33
+ # Load dictionary from the file using pickle
34
+ with open('my_dict.pickle', 'rb') as file:
35
+ dictionary = pickle.load(file)
36
+
37
+ def classify(text,labels):
38
+ output = classifier(text, labels, multi_label=False)
39
+
 
 
40
  return output
41
 
42
 
43
+ text = st.text_input('Enter some text:') # Input field for new text
44
+
45
+ if text:
46
+
47
+ labels = list(dictionary)
48
+
49
+ output = classify(text,labels)
50
+
51
+ output = output["labels"][0]
52
+
53
+ labels = list(dictionary[output])
54
+
55
+ output2 = classify(text,labels)
56
+
57
+ output2 = output2["labels"][0]
58
+
59
+
60
+ answer = dictionary[output][output2]
61
+
62
+ # Create a translator object with specified source and target languages
63
+ translator = Translator(from_lang='el', to_lang='en')
64
+ translator2 = Translator(from_lang='en', to_lang='el')
65
+
66
+ st.text("H ερώτηση σας σχετίζεται με " + output+ " δίκαιο")
67
+
68
+
69
+ # Translate the text from Greek to English
70
+ answer = translator.translate(answer)
71
+ text = translator.translate(text)
72
+
73
+ st.text("Πιο συγκεκριμένα σχετίζεται με " + output2)
74
+
75
+
76
+ # text_to_translate2 = text[499:999]
77
+ # translated_text2 = translator.translate(text_to_translate2)
78
+
79
+
80
+
81
+ st.session_state.history += "Based on this info only:" + answer +" ,answer this question, by reasoning step by step:" + text # Add new text to history
82
+ out = pipe(st.session_state.history, max_new_tokens=256) # Generate output based on history
83
+
84
+
85
+ # st.text(st.session_state.history)
86
+
87
+ translated_text2 = translator2.translate(out[0]['generated_text'])
88
+
89
+
90
+ st.text(translated_text2)
91
+
92
+ # with st.expander("View Full Output", expanded=False):
93
+ # st.write(translated_text2, allow_output_mutation=True)
94
+
95
+ # st.text(translated_text2)
96
+ # st.text("History: " + st.session_state.history)
97
+
98
+ # st.text(output)
99
+ # st.text(output2)
100
+
101
+ # st.text(answer)