wlin21at commited on
Commit
0b1eba5
1 Parent(s): bb7e110
Files changed (1) hide show
  1. metric.py +1 -1
metric.py CHANGED
@@ -67,7 +67,7 @@ def chat_llm_batch(model_id, prompts, limit= 20):
67
  for response in client.text.generation.create(
68
  model_id = model_id,
69
  inputs = prompts, # here each prompt is the concatenation of system prompt and user prompt
70
- execution_options=CreateExecutionOptions(concurrency_limit=limit, ordered=False),
71
  parameters=parameters,
72
  ):
73
  response_list.append(response.results[0].generated_text)
 
67
  for response in client.text.generation.create(
68
  model_id = model_id,
69
  inputs = prompts, # here each prompt is the concatenation of system prompt and user prompt
70
+ execution_options=CreateExecutionOptions(concurrency_limit=limit, ordered=True),
71
  parameters=parameters,
72
  ):
73
  response_list.append(response.results[0].generated_text)