runtime error
._bootstrap>", line 1050, in _gcd_import File "<frozen importlib._bootstrap>", line 1027, in _find_and_load File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 688, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 883, in exec_module File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed File "/home/user/.local/lib/python3.10/site-packages/optimum/onnxruntime/configuration.py", line 27, in <module> from onnxruntime.quantization import CalibraterBase, CalibrationMethod, QuantFormat, QuantizationMode, QuantType File "/home/user/.local/lib/python3.10/site-packages/onnxruntime/quantization/__init__.py", line 1, in <module> from .calibrate import ( # noqa: F401 File "/home/user/.local/lib/python3.10/site-packages/onnxruntime/quantization/calibrate.py", line 21, in <module> from .quant_utils import apply_plot, load_model_with_shape_infer, smooth_distribution File "/home/user/.local/lib/python3.10/site-packages/onnxruntime/quantization/quant_utils.py", line 115, in <module> onnx_proto.TensorProto.FLOAT8E4M3FN: float8e4m3fn, AttributeError: FLOAT8E4M3FN The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/user/app/app.py", line 4, in <module> from infer import get_model_and_tokenizer, batch_embed File "/home/user/app/infer.py", line 14, in <module> from optimum.onnxruntime import ( File "<frozen importlib._bootstrap>", line 1075, in _handle_fromlist File "/home/user/.local/lib/python3.10/site-packages/transformers/utils/import_utils.py", line 1074, in __getattr__ module = self._get_module(self._class_to_module[name]) File "/home/user/.local/lib/python3.10/site-packages/transformers/utils/import_utils.py", line 1086, in _get_module raise RuntimeError( RuntimeError: Failed to import optimum.onnxruntime.configuration because of the following error (look up to see its traceback): FLOAT8E4M3FN
Container logs:
Fetching error logs...