sotosbarl commited on
Commit
c3fafe2
1 Parent(s): caea544

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -97
app.py CHANGED
@@ -1,22 +1,11 @@
1
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
2
- import torch
3
- import pickle
4
- import streamlit as st
5
- from translate import Translator
6
-
7
  from huggingface_hub import InferenceClient
8
  import gradio as gr
9
 
 
 
 
10
 
11
 
12
-
13
-
14
-
15
- def classify(text,labels):
16
- output = classifier(text, labels, multi_label=False)
17
-
18
- return output
19
-
20
  def format_prompt(message, history):
21
  prompt = "<s>"
22
  for user_prompt, bot_response in history:
@@ -53,89 +42,6 @@ def generate(
53
  return output
54
 
55
 
56
- client = InferenceClient(
57
- "mistralai/Mistral-7B-Instruct-v0.1"
58
- )
59
-
60
- classifier = pipeline("zero-shot-classification", model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli")
61
-
62
- with open('my_dict.pickle', 'rb') as file:
63
- dictionary = pickle.load(file)
64
-
65
-
66
- # text = st.text_input('Enter some text:') # Input field for new text
67
-
68
-
69
- # labels = list(dictionary)
70
-
71
- # output = classify(text,labels)
72
-
73
- # output = output["labels"][0]
74
-
75
- # labels = list(dictionary[output])
76
-
77
- # output2 = classify(text,labels)
78
-
79
- # output2 = output2["labels"][0]
80
-
81
-
82
- # answer = dictionary[output][output2]
83
-
84
- # # Create a translator object with specified source and target languages
85
- # translator = Translator(from_lang='el', to_lang='en')
86
- # translator2 = Translator(from_lang='en', to_lang='el')
87
-
88
- # st.text("H ερώτηση σας σχετίζεται με " + output+ " δίκαιο")
89
-
90
-
91
- # # Translate the text from Greek to English
92
- # answer = translator.translate(answer)
93
- # text = translator.translate(text)
94
-
95
- # st.text("Πιο συγκεκριμένα σχετίζεται με " + output2)
96
-
97
-
98
-
99
-
100
- # prompt = "Based on this info only:" + answer +" ,answer this question, by reasoning step by step:" + text
101
- # formatted_prompt = format_prompt(prompt, history)
102
- # translated_text2 = translator2.translate(output)
103
-
104
-
105
- def generate(
106
- prompt, history, temperature=0.7, max_new_tokens=256, top_p=0.95, repetition_penalty=1.1,
107
- ):
108
- temperature = float(temperature)
109
- if temperature < 1e-2:
110
- temperature = 1e-2
111
- top_p = float(top_p)
112
-
113
- generate_kwargs = dict(
114
- temperature=temperature,
115
- max_new_tokens=max_new_tokens,
116
- top_p=top_p,
117
- repetition_penalty=repetition_penalty,
118
- do_sample=True,
119
- seed=42,
120
- )
121
-
122
- formatted_prompt = format_prompt(prompt, history)
123
-
124
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
125
- output = ""
126
-
127
- for response in stream:
128
- output += response.token.text
129
- yield output
130
- return output
131
-
132
-
133
-
134
-
135
-
136
-
137
-
138
-
139
  additional_inputs=[
140
  gr.Slider(
141
  label="Temperature",
 
 
 
 
 
 
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
+ client = InferenceClient(
5
+ "mistralai/Mistral-7B-Instruct-v0.1"
6
+ )
7
 
8
 
 
 
 
 
 
 
 
 
9
  def format_prompt(message, history):
10
  prompt = "<s>"
11
  for user_prompt, bot_response in history:
 
42
  return output
43
 
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  additional_inputs=[
46
  gr.Slider(
47
  label="Temperature",