Lautaro Cardarelli commited on
Commit
f61923d
1 Parent(s): 0d25d57
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -8,7 +8,6 @@ from transformers import BartTokenizer
8
  from transformers import PreTrainedModel
9
  from transformers import PreTrainedTokenizer
10
  from transformers import AutoTokenizer
11
- from transformers import AutoModelForSeq2SeqLM
12
 
13
  tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
14
  model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
@@ -106,14 +105,15 @@ qa_model = T5ForConditionalGeneration.from_pretrained(ckpt).to(device)
106
 
107
  def generate_question_response(question, context):
108
  input_text = 'question: %s context: %s' % (question, context)
 
109
  features = tokenizer([input_text], padding='max_length', truncation=True, max_length=512, return_tensors='pt')
110
  output = qa_model.generate(
111
  input_ids=features['input_ids'].to(device),
112
  attention_mask=features['attention_mask'].to(device),
113
- max_length=200, # Permite respuestas más largas
114
  temperature=1.0 # Ajusta la temperatura
115
  )
116
-
 
117
  return qa_tokenizer.decode(output[0], skip_special_tokens=True)
118
 
119
 
@@ -133,8 +133,6 @@ class SummarizerAndQA:
133
  self.study_generated_questions = generate_questions(text)
134
 
135
  if question != self.question and text != '':
136
- print('entro a generar responses')
137
- print(question)
138
  self.question = question
139
  self.question_response = generate_question_response(question, text)
140
 
 
8
  from transformers import PreTrainedModel
9
  from transformers import PreTrainedTokenizer
10
  from transformers import AutoTokenizer
 
11
 
12
  tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
13
  model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
 
105
 
106
  def generate_question_response(question, context):
107
  input_text = 'question: %s context: %s' % (question, context)
108
+ print(input_text)
109
  features = tokenizer([input_text], padding='max_length', truncation=True, max_length=512, return_tensors='pt')
110
  output = qa_model.generate(
111
  input_ids=features['input_ids'].to(device),
112
  attention_mask=features['attention_mask'].to(device),
 
113
  temperature=1.0 # Ajusta la temperatura
114
  )
115
+ print('output')
116
+ print(output)
117
  return qa_tokenizer.decode(output[0], skip_special_tokens=True)
118
 
119
 
 
133
  self.study_generated_questions = generate_questions(text)
134
 
135
  if question != self.question and text != '':
 
 
136
  self.question = question
137
  self.question_response = generate_question_response(question, text)
138