chainyo commited on
Commit
87bc5be
1 Parent(s): 7dfe6af

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -3
README.md CHANGED
@@ -79,13 +79,15 @@ result
79
  from optimum.onnxruntime import ORTModelForQuestionAnswering
80
  from transformers import AutoTokenizer, pipeline
81
 
82
- tokenizer = AutoTokenizer.from_pretrained("cmarkea/distilcamembert-base-qa")
83
- model = ORTModelForQuestionAnswering.from_pretrained("cmarkea/distilcamembert-base-qa")
 
 
84
  onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer)
85
 
86
  # Quantized onnx model
87
  quantized_model = ORTModelForQuestionAnswering.from_pretrained(
88
- "cmarkea/distilcamembert-base-qa", file_name="model_quantized.onnx"
89
  )
90
  ```
91
 
 
79
  from optimum.onnxruntime import ORTModelForQuestionAnswering
80
  from transformers import AutoTokenizer, pipeline
81
 
82
+ HUB_MODEL = "cmarkea/distilcamembert-base-qa"
83
+
84
+ tokenizer = AutoTokenizer.from_pretrained(HUB_MODEL)
85
+ model = ORTModelForQuestionAnswering.from_pretrained(HUB_MODEL)
86
  onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer)
87
 
88
  # Quantized onnx model
89
  quantized_model = ORTModelForQuestionAnswering.from_pretrained(
90
+ HUB_MODEL, file_name="model_quantized.onnx"
91
  )
92
  ```
93