Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -41,7 +41,7 @@ def group_texts(examples):
|
|
41 |
return result
|
42 |
|
43 |
#Generate Response - nach dem training testen, wie es funktioniert
|
44 |
-
def generate_response(prompt):
|
45 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
46 |
output = model.generate(input_ids, max_length=100)
|
47 |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
@@ -161,8 +161,31 @@ print("Evaluate:")
|
|
161 |
#trainer.evaluate()
|
162 |
print("Done Eval")
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
############################
|
165 |
-
#Test
|
166 |
############################
|
167 |
print("Test")
|
168 |
text = "Was ist Tis?"
|
@@ -170,7 +193,7 @@ text = "Was ist Tis?"
|
|
170 |
# Example usage
|
171 |
prompt = "Was ist TIS?"
|
172 |
|
173 |
-
response = generate_response(prompt)
|
174 |
print(response)
|
175 |
print("response done")
|
176 |
|
@@ -190,22 +213,6 @@ print("Output:\n" )
|
|
190 |
'''
|
191 |
|
192 |
|
193 |
-
###################################################
|
194 |
-
#Save to a place -????? Where????
|
195 |
-
print("Save to ???")
|
196 |
-
#login(token=os.environ["HF_WRITE_TOKEN"])
|
197 |
-
#trainer.save_model("alexkueck/test-tis-1")
|
198 |
-
print("done")
|
199 |
-
|
200 |
-
|
201 |
-
#####################################
|
202 |
-
#Push to Hub
|
203 |
-
print("push to hub")
|
204 |
-
#login(token=os.environ["HF_WRITE_TOKEN"])
|
205 |
-
#trainer.push_to_hub("test-tis-1")
|
206 |
-
#tokenizer.push_to_hub("test-tis-1")????
|
207 |
-
print("done")
|
208 |
-
|
209 |
|
210 |
|
211 |
##############################################
|
|
|
41 |
return result
|
42 |
|
43 |
#Generate Response - nach dem training testen, wie es funktioniert
|
44 |
+
def generate_response(prompt, model, tokenizer):
|
45 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
46 |
output = model.generate(input_ids, max_length=100)
|
47 |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
|
|
161 |
#trainer.evaluate()
|
162 |
print("Done Eval")
|
163 |
|
164 |
+
###################################################
|
165 |
+
#Save to a place -????? Where????
|
166 |
+
print("Save to ???")
|
167 |
+
login(token=os.environ["HF_WRITE_TOKEN"])
|
168 |
+
trainer.save_model("test-tis-1")
|
169 |
+
print("done")
|
170 |
+
|
171 |
+
#######################################
|
172 |
+
# Load model
|
173 |
+
print("load model_neu")
|
174 |
+
model_neu = trainer.load("test-tis-1")
|
175 |
+
print("done load")
|
176 |
+
|
177 |
+
#####################################
|
178 |
+
#Push to Hub
|
179 |
+
#print("push to hub")
|
180 |
+
#login(token=os.environ["HF_WRITE_TOKEN"])
|
181 |
+
#trainer.push_to_hub("test-tis-1")
|
182 |
+
#tokenizer.push_to_hub("test-tis-1")????
|
183 |
+
#print("done")
|
184 |
+
|
185 |
+
|
186 |
+
|
187 |
############################
|
188 |
+
#Test des Modells
|
189 |
############################
|
190 |
print("Test")
|
191 |
text = "Was ist Tis?"
|
|
|
193 |
# Example usage
|
194 |
prompt = "Was ist TIS?"
|
195 |
|
196 |
+
response = generate_response(prompt, model_neu, tokenizer)
|
197 |
print(response)
|
198 |
print("response done")
|
199 |
|
|
|
213 |
'''
|
214 |
|
215 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
|
218 |
##############################################
|