Upload 20 files
Browse files- .gitattributes +1 -0
- Stage Elements ONNX/example/README.md +42 -0
- Stage Elements ONNX/example/onnx_example.py +117 -0
- Stage Elements ONNX/example/requirements.txt +3 -0
- Stage Elements ONNX/labels.txt +8 -0
- Stage Elements ONNX/model.onnx +3 -0
- Stage Elements ONNX/signature.json +1 -0
- Stage Elements TFLite/example/README.md +49 -0
- Stage Elements TFLite/example/requirements.txt +17 -0
- Stage Elements TFLite/example/tflite_example.py +129 -0
- Stage Elements TFLite/labels.txt +8 -0
- Stage Elements TFLite/saved_model.tflite +3 -0
- Stage Elements TFLite/signature.json +1 -0
- Stage Elements TensorFlow/example/README.md +44 -0
- Stage Elements TensorFlow/example/requirements.txt +3 -0
- Stage Elements TensorFlow/example/tf_example.py +121 -0
- Stage Elements TensorFlow/labels.txt +8 -0
- Stage Elements TensorFlow/saved_model.pb +3 -0
- Stage Elements TensorFlow/signature.json +1 -0
- Stage Elements TensorFlow/variables/variables.data-00000-of-00001 +3 -0
- Stage Elements TensorFlow/variables/variables.index +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Stage[[:space:]]Elements[[:space:]]TensorFlow/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
Stage Elements ONNX/example/README.md
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Getting Started - ONNX
|
2 |
+
|
3 |
+
There are 2 key elements in the main export directory. Your model in ONNX format (.onnx) and signature.json which contains information about your Lobe project. With these, you are ready to use your model! If you want to see an example of how to use this model, there are instructions below for running a quick test script.
|
4 |
+
|
5 |
+
## Example Contents
|
6 |
+
|
7 |
+
`signature.json` is created by Lobe and contains information about the model such as label names and the image size and shape the model expects.
|
8 |
+
|
9 |
+
`onnx_example.py` is a simple script to quickly test your exported model. It takes a path to an image on your file system, prepares the image and returns the predicted class and confidence level.
|
10 |
+
|
11 |
+
`requirements.txt` is where the Python libraries and version information required to run the script are found.
|
12 |
+
|
13 |
+
## Run Example
|
14 |
+
|
15 |
+
You will need Python 3.6 and the path to an image on your machine to test.
|
16 |
+
|
17 |
+
Create a virtual environment
|
18 |
+
|
19 |
+
`python -m venv onnx-venv`
|
20 |
+
|
21 |
+
Activate the virtual environment
|
22 |
+
|
23 |
+
macOS `source onnx-venv/bin/activate`
|
24 |
+
|
25 |
+
Windows `onnx-venv\Scripts\activate`
|
26 |
+
|
27 |
+
Install the the dependencies for the example
|
28 |
+
|
29 |
+
`python -m pip install --upgrade pip && pip install -r requirements.txt`
|
30 |
+
|
31 |
+
Run the example and see the model output
|
32 |
+
|
33 |
+
`python onnx_example.py path/to/image/for/testing`
|
34 |
+
|
35 |
+
### Notes
|
36 |
+
|
37 |
+
If you see the error "OSError: image file is truncated", you may need to add the following lines the sample code due to an issue with PIL (Python Image Library)
|
38 |
+
|
39 |
+
```python
|
40 |
+
from PIL import ImageFile
|
41 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
42 |
+
```
|
Stage Elements ONNX/example/onnx_example.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -------------------------------------------------------------
|
2 |
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
3 |
+
# -------------------------------------------------------------
|
4 |
+
"""
|
5 |
+
Skeleton code showing how to load and run the ONNX export package from Lobe.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import argparse
|
9 |
+
import json
|
10 |
+
import os
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
from PIL import Image
|
14 |
+
import onnxruntime as rt
|
15 |
+
|
16 |
+
EXPORT_MODEL_VERSION = 1
|
17 |
+
|
18 |
+
|
19 |
+
class ONNXModel:
|
20 |
+
def __init__(self, dir_path) -> None:
|
21 |
+
"""Method to get name of model file. Assumes model is in the parent directory for script."""
|
22 |
+
model_dir = os.path.dirname(dir_path)
|
23 |
+
with open(os.path.join(model_dir, "signature.json"), "r") as f:
|
24 |
+
self.signature = json.load(f)
|
25 |
+
self.model_file = os.path.join(model_dir, self.signature.get("filename"))
|
26 |
+
if not os.path.isfile(self.model_file):
|
27 |
+
raise FileNotFoundError(f"Model file does not exist")
|
28 |
+
# get the signature for model inputs and outputs
|
29 |
+
self.signature_inputs = self.signature.get("inputs")
|
30 |
+
self.signature_outputs = self.signature.get("outputs")
|
31 |
+
self.session = None
|
32 |
+
if "Image" not in self.signature_inputs:
|
33 |
+
raise ValueError("ONNX model doesn't have 'Image' input! Check signature.json, and please report issue to Lobe.")
|
34 |
+
# Look for the version in signature file.
|
35 |
+
# If it's not found or the doesn't match expected, print a message
|
36 |
+
version = self.signature.get("export_model_version")
|
37 |
+
if version is None or version != EXPORT_MODEL_VERSION:
|
38 |
+
print(
|
39 |
+
f"There has been a change to the model format. Please use a model with a signature 'export_model_version' that matches {EXPORT_MODEL_VERSION}."
|
40 |
+
)
|
41 |
+
|
42 |
+
def load(self) -> None:
|
43 |
+
"""Load the model from path to model file"""
|
44 |
+
# Load ONNX model as session.
|
45 |
+
self.session = rt.InferenceSession(path_or_bytes=self.model_file)
|
46 |
+
|
47 |
+
def predict(self, image: Image.Image) -> dict:
|
48 |
+
"""
|
49 |
+
Predict with the ONNX session!
|
50 |
+
"""
|
51 |
+
# process image to be compatible with the model
|
52 |
+
img = self.process_image(image, self.signature_inputs.get("Image").get("shape"))
|
53 |
+
# run the model!
|
54 |
+
fetches = [(key, value.get("name")) for key, value in self.signature_outputs.items()]
|
55 |
+
# make the image a batch of 1
|
56 |
+
feed = {self.signature_inputs.get("Image").get("name"): [img]}
|
57 |
+
outputs = self.session.run(output_names=[name for (_, name) in fetches], input_feed=feed)
|
58 |
+
return self.process_output(fetches, outputs)
|
59 |
+
|
60 |
+
def process_image(self, image: Image.Image, input_shape: list) -> np.ndarray:
|
61 |
+
"""
|
62 |
+
Given a PIL Image, center square crop and resize to fit the expected model input, and convert from [0,255] to [0,1] values.
|
63 |
+
"""
|
64 |
+
width, height = image.size
|
65 |
+
# ensure image type is compatible with model and convert if not
|
66 |
+
if image.mode != "RGB":
|
67 |
+
image = image.convert("RGB")
|
68 |
+
# center crop image (you can substitute any other method to make a square image, such as just resizing or padding edges with 0)
|
69 |
+
if width != height:
|
70 |
+
square_size = min(width, height)
|
71 |
+
left = (width - square_size) / 2
|
72 |
+
top = (height - square_size) / 2
|
73 |
+
right = (width + square_size) / 2
|
74 |
+
bottom = (height + square_size) / 2
|
75 |
+
# Crop the center of the image
|
76 |
+
image = image.crop((left, top, right, bottom))
|
77 |
+
# now the image is square, resize it to be the right shape for the model input
|
78 |
+
input_width, input_height = input_shape[1:3]
|
79 |
+
if image.width != input_width or image.height != input_height:
|
80 |
+
image = image.resize((input_width, input_height))
|
81 |
+
|
82 |
+
# make 0-1 float instead of 0-255 int (that PIL Image loads by default)
|
83 |
+
image = np.asarray(image) / 255.0
|
84 |
+
# format input as model expects
|
85 |
+
return image.astype(np.float32)
|
86 |
+
|
87 |
+
def process_output(self, fetches: dict, outputs: dict) -> dict:
|
88 |
+
# un-batch since we ran an image with batch size of 1,
|
89 |
+
# convert to normal python types with tolist(), and convert any byte strings to normal strings with .decode()
|
90 |
+
out_keys = ["label", "confidence"]
|
91 |
+
results = {}
|
92 |
+
for i, (key, _) in enumerate(fetches):
|
93 |
+
val = outputs[i].tolist()[0]
|
94 |
+
if isinstance(val, bytes):
|
95 |
+
val = val.decode()
|
96 |
+
results[key] = val
|
97 |
+
confs = results["Confidences"]
|
98 |
+
labels = self.signature.get("classes").get("Label")
|
99 |
+
output = [dict(zip(out_keys, group)) for group in zip(labels, confs)]
|
100 |
+
sorted_output = {"predictions": sorted(output, key=lambda k: k["confidence"], reverse=True)}
|
101 |
+
return sorted_output
|
102 |
+
|
103 |
+
|
104 |
+
if __name__ == "__main__":
|
105 |
+
parser = argparse.ArgumentParser(description="Predict a label for an image.")
|
106 |
+
parser.add_argument("image", help="Path to your image file.")
|
107 |
+
args = parser.parse_args()
|
108 |
+
dir_path = os.getcwd()
|
109 |
+
|
110 |
+
if os.path.isfile(args.image):
|
111 |
+
image = Image.open(args.image)
|
112 |
+
model = ONNXModel(dir_path=dir_path)
|
113 |
+
model.load()
|
114 |
+
outputs = model.predict(image)
|
115 |
+
print(f"Predicted: {outputs}")
|
116 |
+
else:
|
117 |
+
print(f"Couldn't find image file {args.image}")
|
Stage Elements ONNX/example/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
pillow==9.0.1
|
2 |
+
onnxruntime==1.6.0
|
3 |
+
numpy==1.19.5
|
Stage Elements ONNX/labels.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Anti-Avalancha
|
2 |
+
Ballast 1000 liter tank
|
3 |
+
Chemical Toilet
|
4 |
+
K1 Bumper
|
5 |
+
LA Rack II
|
6 |
+
LA Rack II Bumper
|
7 |
+
LA Rack II Dolly Board
|
8 |
+
Portable Dreesing Room
|
Stage Elements ONNX/model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eab95dd6ccc215038c2e7e5faed9f08af2cf230ab1285967b6d159957e769057
|
3 |
+
size 99524084
|
Stage Elements ONNX/signature.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"doc_id": "680d8281-8f48-4405-be9a-9a9a731bf332", "doc_name": "Stage Elements", "doc_version": "fd4eba4881fae5c6131c5100d6e15260", "format": "onnx", "version": 80, "inputs": {"Image": {"dtype": "float32", "shape": [null, 224, 224, 3], "name": "Image:0"}}, "outputs": {"Confidences": {"dtype": "float32", "shape": [null, 8], "name": "sequential/dense_2/Softmax:0"}}, "tags": [], "classes": {"Label": ["Anti-Avalancha", "Ballast 1000 liter tank", "Chemical Toilet", "K1 Bumper", "LA Rack II", "LA Rack II Bumper", "LA Rack II Dolly Board", "Portable Dreesing Room"]}, "filename": "model.onnx", "export_model_version": 1}
|
Stage Elements TFLite/example/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Getting Started - TensorFlow Lite
|
2 |
+
|
3 |
+
There are 2 key elements in the main export directory. Your model in TFLite format (.tflite) and signature.json which contains information about your Lobe project. With these, you are ready to use your model! If you want to see an example of how to use this model, there are instructions below for running a quick test script.
|
4 |
+
|
5 |
+
## Example Contents
|
6 |
+
|
7 |
+
`signature.json` is created by Lobe and contains information about the model such as label names and the image size and shape the model expects.
|
8 |
+
|
9 |
+
`tflite_example.py` is a simple script to quickly test your exported model. It takes a path to an image on your file system, prepares the image and returns the predicted class and confidence level.
|
10 |
+
|
11 |
+
`requirements.txt` is where the Python libraries and version information required to run the script are found.
|
12 |
+
|
13 |
+
## Run Example
|
14 |
+
|
15 |
+
You will need Python 3.6 and the path to an image on your machine to test.
|
16 |
+
|
17 |
+
Create a virtual environment
|
18 |
+
|
19 |
+
`python -m venv tflite-venv`
|
20 |
+
|
21 |
+
Activate the virtual environment
|
22 |
+
|
23 |
+
macOS `source tflite-venv/bin/activate`
|
24 |
+
|
25 |
+
Windows `tflite-venv\Scripts\activate`
|
26 |
+
|
27 |
+
Install the the dependencies for the example
|
28 |
+
|
29 |
+
`python -m pip install --upgrade pip && pip install -r requirements.txt`
|
30 |
+
|
31 |
+
Pip with the `requirements.txt` file should install the Tensorflow Lite runtime appropriate to your OS version if you are on Windows, Mac, or Linux.
|
32 |
+
Please double check to make sure that `tflite_runtime` was installed via pip. If you have a different Linux distribution (such as Raspberry Pi),
|
33 |
+
or find that the runtime was not installed from pip, then please install the appropriate [Tensorflow Lite runtime](https://www.tensorflow.org/lite/guide/python#install_just_the_tensorflow_lite_interpreter) wheel based on your OS and Python version.
|
34 |
+
For example, if you are on Windows 10 with Python 3.6:
|
35 |
+
|
36 |
+
`pip install https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp36-cp36m-win_amd64.whl`
|
37 |
+
|
38 |
+
Finally, run the example and see the model output
|
39 |
+
|
40 |
+
`python tflite_example.py path/to/image/for/testing`
|
41 |
+
|
42 |
+
### Notes
|
43 |
+
|
44 |
+
If you see the error "OSError: image file is truncated", you may need to add the following lines the sample code due to an issue with PIL (Python Image Library)
|
45 |
+
|
46 |
+
```python
|
47 |
+
from PIL import ImageFile
|
48 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
49 |
+
```
|
Stage Elements TFLite/example/requirements.txt
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pillow==9.0.1
|
2 |
+
# TF Lite runtime packages based on OS and python version: https://www.tensorflow.org/lite/guide/python#install_just_the_tensorflow_lite_interpreter
|
3 |
+
# windows
|
4 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp35-cp35m-win_amd64.whl; sys_platform == 'win32' and python_version == '3.5'
|
5 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp36-cp36m-win_amd64.whl; sys_platform == 'win32' and python_version == '3.6'
|
6 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp37-cp37m-win_amd64.whl; sys_platform == 'win32' and python_version == '3.7'
|
7 |
+
# mac
|
8 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp35-cp35m-macosx_10_14_x86_64.whl; sys_platform == 'darwin' and python_version == '3.5'
|
9 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp36-cp36m-macosx_10_14_x86_64.whl; sys_platform == 'darwin' and python_version == '3.6'
|
10 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp37-cp37m-macosx_10_14_x86_64.whl; sys_platform == 'darwin' and python_version == '3.7'
|
11 |
+
# linux
|
12 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp35-cp35m-linux_x86_64.whl; sys_platform == 'linux' and platform_machine == 'x86_64' and python_version == '3.5'
|
13 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp36-cp36m-linux_x86_64.whl; sys_platform == 'linux' and platform_machine == 'x86_64' and python_version == '3.6'
|
14 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp37-cp37m-linux_x86_64.whl; sys_platform == 'linux' and platform_machine == 'x86_64' and python_version == '3.7'
|
15 |
+
https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp38-cp38-linux_x86_64.whl; sys_platform == 'linux' and platform_machine == 'x86_64' and python_version == '3.8'
|
16 |
+
# for other linux/raspberry pi, please see the link above to find the right version for your OS
|
17 |
+
numpy==1.19.5
|
Stage Elements TFLite/example/tflite_example.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -------------------------------------------------------------
|
2 |
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
3 |
+
# -------------------------------------------------------------
|
4 |
+
"""
|
5 |
+
Skeleton code showing how to load and run the TensorFlow Lite export package from Lobe.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import argparse
|
9 |
+
import json
|
10 |
+
import os
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
from PIL import Image
|
14 |
+
|
15 |
+
try:
|
16 |
+
import tflite_runtime.interpreter as tflite
|
17 |
+
except ImportError:
|
18 |
+
from tensorflow import lite as tflite
|
19 |
+
|
20 |
+
EXPORT_MODEL_VERSION = 1
|
21 |
+
|
22 |
+
|
23 |
+
class TFLiteModel:
|
24 |
+
def __init__(self, dir_path) -> None:
|
25 |
+
"""Method to get name of model file. Assumes model is in the parent directory for script."""
|
26 |
+
model_dir = os.path.dirname(dir_path)
|
27 |
+
with open(os.path.join(model_dir, "signature.json"), "r") as f:
|
28 |
+
self.signature = json.load(f)
|
29 |
+
self.model_file = os.path.join(model_dir, self.signature.get("filename"))
|
30 |
+
if not os.path.isfile(self.model_file):
|
31 |
+
raise FileNotFoundError(f"Model file does not exist")
|
32 |
+
self.interpreter = None
|
33 |
+
self.signature_inputs = self.signature.get("inputs")
|
34 |
+
self.signature_outputs = self.signature.get("outputs")
|
35 |
+
# Look for the version in signature file.
|
36 |
+
# If it's not found or the doesn't match expected, print a message
|
37 |
+
version = self.signature.get("export_model_version")
|
38 |
+
if version is None or version != EXPORT_MODEL_VERSION:
|
39 |
+
print(
|
40 |
+
f"There has been a change to the model format. Please use a model with a signature 'export_model_version' that matches {EXPORT_MODEL_VERSION}."
|
41 |
+
)
|
42 |
+
|
43 |
+
def load(self) -> None:
|
44 |
+
"""Load the model from path to model file"""
|
45 |
+
# Load TFLite model and allocate tensors.
|
46 |
+
self.interpreter = tflite.Interpreter(model_path=self.model_file)
|
47 |
+
self.interpreter.allocate_tensors()
|
48 |
+
# Combine the information about the inputs and outputs from the signature.json file with the Interpreter runtime
|
49 |
+
input_details = {detail.get("name"): detail for detail in self.interpreter.get_input_details()}
|
50 |
+
self.model_inputs = {key: {**sig, **input_details.get(sig.get("name"))} for key, sig in self.signature_inputs.items()}
|
51 |
+
output_details = {detail.get("name"): detail for detail in self.interpreter.get_output_details()}
|
52 |
+
self.model_outputs = {key: {**sig, **output_details.get(sig.get("name"))} for key, sig in self.signature_outputs.items()}
|
53 |
+
if "Image" not in self.model_inputs:
|
54 |
+
raise ValueError("Tensorflow Lite model doesn't have 'Image' input! Check signature.json, and please report issue to Lobe.")
|
55 |
+
|
56 |
+
def predict(self, image) -> dict:
|
57 |
+
"""
|
58 |
+
Predict with the TFLite interpreter!
|
59 |
+
"""
|
60 |
+
if self.interpreter is None:
|
61 |
+
self.load()
|
62 |
+
|
63 |
+
# process image to be compatible with the model
|
64 |
+
input_data = self.process_image(image, self.model_inputs.get("Image").get("shape"))
|
65 |
+
# set the input to run
|
66 |
+
self.interpreter.set_tensor(self.model_inputs.get("Image").get("index"), input_data)
|
67 |
+
self.interpreter.invoke()
|
68 |
+
|
69 |
+
# grab our desired outputs from the interpreter!
|
70 |
+
# un-batch since we ran an image with batch size of 1, and convert to normal python types with tolist()
|
71 |
+
outputs = {key: self.interpreter.get_tensor(value.get("index")).tolist()[0] for key, value in self.model_outputs.items()}
|
72 |
+
return self.process_output(outputs)
|
73 |
+
|
74 |
+
def process_image(self, image, input_shape) -> np.ndarray:
|
75 |
+
"""
|
76 |
+
Given a PIL Image, center square crop and resize to fit the expected model input, and convert from [0,255] to [0,1] values.
|
77 |
+
"""
|
78 |
+
width, height = image.size
|
79 |
+
# ensure image type is compatible with model and convert if not
|
80 |
+
if image.mode != "RGB":
|
81 |
+
image = image.convert("RGB")
|
82 |
+
# center crop image (you can substitute any other method to make a square image, such as just resizing or padding edges with 0)
|
83 |
+
if width != height:
|
84 |
+
square_size = min(width, height)
|
85 |
+
left = (width - square_size) / 2
|
86 |
+
top = (height - square_size) / 2
|
87 |
+
right = (width + square_size) / 2
|
88 |
+
bottom = (height + square_size) / 2
|
89 |
+
# Crop the center of the image
|
90 |
+
image = image.crop((left, top, right, bottom))
|
91 |
+
# now the image is square, resize it to be the right shape for the model input
|
92 |
+
input_width, input_height = input_shape[1:3]
|
93 |
+
if image.width != input_width or image.height != input_height:
|
94 |
+
image = image.resize((input_width, input_height))
|
95 |
+
|
96 |
+
# make 0-1 float instead of 0-255 int (that PIL Image loads by default)
|
97 |
+
image = np.asarray(image) / 255.0
|
98 |
+
# format input as model expects
|
99 |
+
return image.reshape(input_shape).astype(np.float32)
|
100 |
+
|
101 |
+
def process_output(self, outputs) -> dict:
|
102 |
+
# postprocessing! convert any byte strings to normal strings with .decode()
|
103 |
+
out_keys = ["label", "confidence"]
|
104 |
+
for key, val in outputs.items():
|
105 |
+
if isinstance(val, bytes):
|
106 |
+
outputs[key] = val.decode()
|
107 |
+
|
108 |
+
# get list of confidences from prediction
|
109 |
+
confs = list(outputs.values())[0]
|
110 |
+
labels = self.signature.get("classes").get("Label")
|
111 |
+
output = [dict(zip(out_keys, group)) for group in zip(labels, confs)]
|
112 |
+
sorted_output = {"predictions": sorted(output, key=lambda k: k["confidence"], reverse=True)}
|
113 |
+
return sorted_output
|
114 |
+
|
115 |
+
|
116 |
+
if __name__ == "__main__":
|
117 |
+
parser = argparse.ArgumentParser(description="Predict a label for an image.")
|
118 |
+
parser.add_argument("image", help="Path to your image file.")
|
119 |
+
args = parser.parse_args()
|
120 |
+
dir_path = os.getcwd()
|
121 |
+
|
122 |
+
if os.path.isfile(args.image):
|
123 |
+
image = Image.open(args.image)
|
124 |
+
model = TFLiteModel(dir_path=dir_path)
|
125 |
+
model.load()
|
126 |
+
outputs = model.predict(image)
|
127 |
+
print(f"Predicted: {outputs}")
|
128 |
+
else:
|
129 |
+
print(f"Couldn't find image file {args.image}")
|
Stage Elements TFLite/labels.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Anti-Avalancha
|
2 |
+
Ballast 1000 liter tank
|
3 |
+
Chemical Toilet
|
4 |
+
K1 Bumper
|
5 |
+
LA Rack II
|
6 |
+
LA Rack II Bumper
|
7 |
+
LA Rack II Dolly Board
|
8 |
+
Portable Dreesing Room
|
Stage Elements TFLite/saved_model.tflite
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f242601a147e6a559259bbb0c7714516deecead85eb99e3643dd38d59cf91334
|
3 |
+
size 99366228
|
Stage Elements TFLite/signature.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"doc_id": "680d8281-8f48-4405-be9a-9a9a731bf332", "doc_name": "Stage Elements", "doc_version": "fd4eba4881fae5c6131c5100d6e15260", "format": "tf_lite", "version": 80, "inputs": {"Image": {"dtype": "float32", "shape": [null, 224, 224, 3], "name": "Image"}}, "outputs": {"Confidences": {"dtype": "float32", "shape": [null, 8], "name": "sequential/dense_2/Softmax"}}, "tags": [], "classes": {"Label": ["Anti-Avalancha", "Ballast 1000 liter tank", "Chemical Toilet", "K1 Bumper", "LA Rack II", "LA Rack II Bumper", "LA Rack II Dolly Board", "Portable Dreesing Room"]}, "filename": "saved_model.tflite", "export_model_version": 1}
|
Stage Elements TensorFlow/example/README.md
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Getting Started - TensorFlow
|
2 |
+
|
3 |
+
In the main export directory there is the exported model and a directory named variables that make up the Tensorflow model. There is also a file named signature.json which contains information about your Lobe project. With these, you are ready to use your model! If you want to see an example of how to use this model, there are instructions below for running a quick test script.
|
4 |
+
|
5 |
+
## Example Contents
|
6 |
+
|
7 |
+
`signature.json` is created by Lobe and contains information about the model such as label names and the image size and shape the model expects.
|
8 |
+
|
9 |
+
`tf_example.py` is a simple script to quickly test your exported model. It takes a path to an image on your file system, prepares the image and returns the predicted class and confidence level.
|
10 |
+
|
11 |
+
`requirements.txt` is where the Python libraries and version information required to run the script are found.
|
12 |
+
|
13 |
+
## Run Example
|
14 |
+
|
15 |
+
You will need Python 3.6 and the path to an image on your machine to test.
|
16 |
+
|
17 |
+
Create a virtual environment
|
18 |
+
|
19 |
+
`python -m venv tf-venv`
|
20 |
+
|
21 |
+
Activate the virtual environment
|
22 |
+
|
23 |
+
macOS `source tf-venv/bin/activate`
|
24 |
+
|
25 |
+
Windows `tf-venv\Scripts\activate`
|
26 |
+
|
27 |
+
Install the the dependencies for the example
|
28 |
+
|
29 |
+
`python -m pip install --upgrade pip && pip install -r requirements.txt`
|
30 |
+
|
31 |
+
Run the example and see the model output
|
32 |
+
|
33 |
+
`python tf_example.py path/to/image/for/testing`
|
34 |
+
|
35 |
+
### Notes
|
36 |
+
|
37 |
+
- We are using TensorFlow 2.5.0 to run the tf_example.py file. If you see any GPU errors or want to run the script on GPU please refer to https://www.tensorflow.org/install/gpu
|
38 |
+
|
39 |
+
- If you see the error "OSError: image file is truncated", you may need to add the following lines the sample code due to an issue with PIL (Python Image Library)
|
40 |
+
|
41 |
+
```python
|
42 |
+
from PIL import ImageFile
|
43 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
44 |
+
```
|
Stage Elements TensorFlow/example/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow==2.9.0
|
2 |
+
pillow==9.0.1
|
3 |
+
protobuf==3.19.4
|
Stage Elements TensorFlow/example/tf_example.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -------------------------------------------------------------
|
2 |
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
3 |
+
# -------------------------------------------------------------
|
4 |
+
"""
|
5 |
+
Skeleton code showing how to load and run the TensorFlow SavedModel export package from Lobe.
|
6 |
+
"""
|
7 |
+
import argparse
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
import numpy as np
|
11 |
+
from threading import Lock
|
12 |
+
|
13 |
+
# printing only warnings and error messages
|
14 |
+
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
|
15 |
+
|
16 |
+
try:
|
17 |
+
import tensorflow as tf
|
18 |
+
from PIL import Image
|
19 |
+
except ImportError:
|
20 |
+
raise ImportError("ERROR: Failed to import libraries. Please refer to READEME.md file\n")
|
21 |
+
|
22 |
+
EXPORT_MODEL_VERSION = 1
|
23 |
+
|
24 |
+
|
25 |
+
class TFModel:
|
26 |
+
def __init__(self, dir_path) -> None:
|
27 |
+
# Assume model is in the parent directory for this file
|
28 |
+
self.model_dir = os.path.dirname(dir_path)
|
29 |
+
# make sure our exported SavedModel folder exists
|
30 |
+
with open(os.path.join(self.model_dir, "signature.json"), "r") as f:
|
31 |
+
self.signature = json.load(f)
|
32 |
+
self.model_file = os.path.join(self.model_dir, self.signature.get("filename"))
|
33 |
+
if not os.path.isfile(self.model_file):
|
34 |
+
raise FileNotFoundError(f"Model file does not exist")
|
35 |
+
self.inputs = self.signature.get("inputs")
|
36 |
+
self.outputs = self.signature.get("outputs")
|
37 |
+
self.lock = Lock()
|
38 |
+
|
39 |
+
# loading the saved model
|
40 |
+
self.model = tf.saved_model.load(tags=self.signature.get("tags"), export_dir=self.model_dir)
|
41 |
+
self.predict_fn = self.model.signatures["serving_default"]
|
42 |
+
|
43 |
+
# Look for the version in signature file.
|
44 |
+
# If it's not found or the doesn't match expected, print a message
|
45 |
+
version = self.signature.get("export_model_version")
|
46 |
+
if version is None or version != EXPORT_MODEL_VERSION:
|
47 |
+
print(
|
48 |
+
f"There has been a change to the model format. Please use a model with a signature 'export_model_version' that matches {EXPORT_MODEL_VERSION}."
|
49 |
+
)
|
50 |
+
|
51 |
+
def predict(self, image: Image.Image) -> dict:
|
52 |
+
# pre-processing the image before passing to model
|
53 |
+
image = self.process_image(image, self.inputs.get("Image").get("shape"))
|
54 |
+
|
55 |
+
with self.lock:
|
56 |
+
# create the feed dictionary that is the input to the model
|
57 |
+
feed_dict = {}
|
58 |
+
# first, add our image to the dictionary (comes from our signature.json file)
|
59 |
+
feed_dict[list(self.inputs.keys())[0]] = tf.convert_to_tensor(image)
|
60 |
+
# run the model!
|
61 |
+
outputs = self.predict_fn(**feed_dict)
|
62 |
+
# return the processed output
|
63 |
+
return self.process_output(outputs)
|
64 |
+
|
65 |
+
def process_image(self, image, input_shape) -> np.ndarray:
|
66 |
+
"""
|
67 |
+
Given a PIL Image, center square crop and resize to fit the expected model input, and convert from [0,255] to [0,1] values.
|
68 |
+
"""
|
69 |
+
width, height = image.size
|
70 |
+
# ensure image type is compatible with model and convert if not
|
71 |
+
if image.mode != "RGB":
|
72 |
+
image = image.convert("RGB")
|
73 |
+
# center crop image (you can substitute any other method to make a square image, such as just resizing or padding edges with 0)
|
74 |
+
if width != height:
|
75 |
+
square_size = min(width, height)
|
76 |
+
left = (width - square_size) / 2
|
77 |
+
top = (height - square_size) / 2
|
78 |
+
right = (width + square_size) / 2
|
79 |
+
bottom = (height + square_size) / 2
|
80 |
+
# Crop the center of the image
|
81 |
+
image = image.crop((left, top, right, bottom))
|
82 |
+
# now the image is square, resize it to be the right shape for the model input
|
83 |
+
input_width, input_height = input_shape[1:3]
|
84 |
+
if image.width != input_width or image.height != input_height:
|
85 |
+
image = image.resize((input_width, input_height))
|
86 |
+
|
87 |
+
# make 0-1 float instead of 0-255 int (that PIL Image loads by default)
|
88 |
+
image = np.asarray(image) / 255.0
|
89 |
+
# pad with an extra batch dimension as expected by the model
|
90 |
+
return np.expand_dims(image, axis=0).astype(np.float32)
|
91 |
+
|
92 |
+
def process_output(self, outputs) -> dict:
|
93 |
+
# do a bit of postprocessing
|
94 |
+
out_keys = ["label", "confidence"]
|
95 |
+
results = {}
|
96 |
+
# since we actually ran on a batch of size 1, index out the items from the returned numpy arrays
|
97 |
+
for key, tf_val in outputs.items():
|
98 |
+
val = tf_val.numpy().tolist()[0]
|
99 |
+
if isinstance(val, bytes):
|
100 |
+
val = val.decode()
|
101 |
+
results[key] = val
|
102 |
+
confs = results["Confidences"]
|
103 |
+
labels = self.signature.get("classes").get("Label")
|
104 |
+
output = [dict(zip(out_keys, group)) for group in zip(labels, confs)]
|
105 |
+
sorted_output = {"predictions": sorted(output, key=lambda k: k["confidence"], reverse=True)}
|
106 |
+
return sorted_output
|
107 |
+
|
108 |
+
|
109 |
+
if __name__ == "__main__":
|
110 |
+
parser = argparse.ArgumentParser(description="Predict a label for an image.")
|
111 |
+
parser.add_argument("image", help="Path to your image file.")
|
112 |
+
args = parser.parse_args()
|
113 |
+
dir_path = os.getcwd()
|
114 |
+
|
115 |
+
if os.path.isfile(args.image):
|
116 |
+
image = Image.open(args.image)
|
117 |
+
model = TFModel(dir_path=dir_path)
|
118 |
+
outputs = model.predict(image)
|
119 |
+
print(f"Predicted: {outputs}")
|
120 |
+
else:
|
121 |
+
print(f"Couldn't find image file {args.image}")
|
Stage Elements TensorFlow/labels.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Anti-Avalancha
|
2 |
+
Ballast 1000 liter tank
|
3 |
+
Chemical Toilet
|
4 |
+
K1 Bumper
|
5 |
+
LA Rack II
|
6 |
+
LA Rack II Bumper
|
7 |
+
LA Rack II Dolly Board
|
8 |
+
Portable Dreesing Room
|
Stage Elements TensorFlow/saved_model.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bacc62994cbc9b7a862cbe197f36760c8d90f2cdb4c517d637bd31e4ebbdfa8d
|
3 |
+
size 2212095
|
Stage Elements TensorFlow/signature.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"doc_id": "680d8281-8f48-4405-be9a-9a9a731bf332", "doc_name": "Stage Elements", "doc_version": "fd4eba4881fae5c6131c5100d6e15260", "format": "tf", "version": 80, "inputs": {"Image": {"dtype": "float32", "shape": [null, 224, 224, 3], "name": "Image:0"}}, "outputs": {"Confidences": {"dtype": "float32", "shape": [null, 8], "name": "sequential/dense_2/Softmax:0"}}, "tags": ["serve"], "classes": {"Label": ["Anti-Avalancha", "Ballast 1000 liter tank", "Chemical Toilet", "K1 Bumper", "LA Rack II", "LA Rack II Bumper", "LA Rack II Dolly Board", "Portable Dreesing Room"]}, "filename": "saved_model.pb", "export_model_version": 1}
|
Stage Elements TensorFlow/variables/variables.data-00000-of-00001
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd3309de50979d581e929d57c85870129a0e28c5eac21ffdf0e8e545c61c7f60
|
3 |
+
size 99538976
|
Stage Elements TensorFlow/variables/variables.index
ADDED
Binary file (10.9 kB). View file
|
|