1inkusFace commited on
Commit
5c6e9ae
·
verified ·
1 Parent(s): 2eb9118

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -27,16 +27,16 @@ def generate_code(prompt):
27
  add_generation_prompt=True
28
  )
29
  model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
30
- #with torch.no_grad():
31
- generated_ids = model.generate(
32
- **model_inputs,
33
- max_length = 1024,
34
- min_new_tokens = 256,
35
- #low_memory = True,
36
- do_sample = True,
37
- #token_healing = True,
38
- guidance_scale = 3.8,
39
- )
40
  generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
41
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
42
  return response
 
27
  add_generation_prompt=True
28
  )
29
  model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
30
+ with torch.no_grad():
31
+ generated_ids = model.generate(
32
+ **model_inputs,
33
+ max_length = 1024,
34
+ min_new_tokens = 256,
35
+ #low_memory = True,
36
+ do_sample = True,
37
+ #token_healing = True,
38
+ guidance_scale = 3.8,
39
+ )
40
  generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
41
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
42
  return response