TOOTLE commited on
Commit
95333ce
·
verified ·
1 Parent(s): c0ccf36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -27,25 +27,26 @@ def load_model():
27
 
28
 
29
  def chatbot_response(prompt):
 
 
30
  inputs = tokenizer(
31
  alpaca_prompt.format(prompt),
32
  return_tensors="pt",
33
  truncation=True,
34
  max_length=512
35
  )
36
- print(inputs)
37
  outputs = model.generate(
38
  inputs["input_ids"],
39
  max_new_tokens=1024,
40
  temperature=0.7,
41
  do_sample=True
42
  )
43
- print(outputs)
44
 
45
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
46
- print(response)
47
  reponse = response.split("### Response:")
48
- return reponse[-1]
 
49
 
50
  # Load model and tokenizer
51
  model, tokenizer = load_model()
 
27
 
28
 
29
  def chatbot_response(prompt):
30
+ global model, tokenizer # Assurez-vous que la fonction accède aux variables globales
31
+
32
  inputs = tokenizer(
33
  alpaca_prompt.format(prompt),
34
  return_tensors="pt",
35
  truncation=True,
36
  max_length=512
37
  )
38
+
39
  outputs = model.generate(
40
  inputs["input_ids"],
41
  max_new_tokens=1024,
42
  temperature=0.7,
43
  do_sample=True
44
  )
 
45
 
46
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
47
  reponse = response.split("### Response:")
48
+
49
+ return reponse[-1].strip() # Supprime les espaces vides inutiles
50
 
51
  # Load model and tokenizer
52
  model, tokenizer = load_model()