Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -137,16 +137,16 @@ import qai_hub as hub
|
|
137 |
from qai_hub_models.models.whisper_tiny_en import WhisperEncoder,WhisperDecoder
|
138 |
|
139 |
# Load the model
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
|
145 |
# Device
|
146 |
device = hub.Device("Samsung Galaxy S23")
|
147 |
|
148 |
|
149 |
-
|
150 |
encoder_input_shape = encoder_model.get_input_spec()
|
151 |
encoder_sample_inputs = encoder_model.sample_inputs()
|
152 |
|
@@ -163,7 +163,7 @@ encoder_compile_job = hub.submit_compile_job(
|
|
163 |
encoder_target_model = encoder_compile_job.get_target_model()
|
164 |
|
165 |
|
166 |
-
|
167 |
decoder_input_shape = decoder_model.get_input_spec()
|
168 |
decoder_sample_inputs = decoder_model.sample_inputs()
|
169 |
|
@@ -191,10 +191,10 @@ provisioned in the cloud. Once the job is submitted, you can navigate to a
|
|
191 |
provided job URL to view a variety of on-device performance metrics.
|
192 |
```python
|
193 |
profile_job = hub.submit_profile_job(
|
194 |
-
|
195 |
-
|
196 |
-
)
|
197 |
-
|
198 |
```
|
199 |
|
200 |
Step 3: **Verify on-device accuracy**
|
@@ -203,13 +203,12 @@ To verify the accuracy of the model on-device, you can run on-device inference
|
|
203 |
on sample input data on the same cloud hosted device.
|
204 |
```python
|
205 |
input_data = torch_model.sample_inputs()
|
206 |
-
inference_job = hub.submit_inference_job(
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
)
|
211 |
-
|
212 |
-
on_device_output = inference_job.download_output_data()
|
213 |
|
214 |
```
|
215 |
With the output of the model, you can compute like PSNR, relative errors or
|
|
|
137 |
from qai_hub_models.models.whisper_tiny_en import WhisperEncoder,WhisperDecoder
|
138 |
|
139 |
# Load the model
|
140 |
+
encoder_model = WhisperEncoder.from_pretrained()
|
141 |
+
|
142 |
+
decoder_model = WhisperDecoder.from_pretrained()
|
143 |
+
|
144 |
|
145 |
# Device
|
146 |
device = hub.Device("Samsung Galaxy S23")
|
147 |
|
148 |
|
149 |
+
# Trace model
|
150 |
encoder_input_shape = encoder_model.get_input_spec()
|
151 |
encoder_sample_inputs = encoder_model.sample_inputs()
|
152 |
|
|
|
163 |
encoder_target_model = encoder_compile_job.get_target_model()
|
164 |
|
165 |
|
166 |
+
# Trace model
|
167 |
decoder_input_shape = decoder_model.get_input_spec()
|
168 |
decoder_sample_inputs = decoder_model.sample_inputs()
|
169 |
|
|
|
191 |
provided job URL to view a variety of on-device performance metrics.
|
192 |
```python
|
193 |
profile_job = hub.submit_profile_job(
|
194 |
+
model=target_model,
|
195 |
+
device=device,
|
196 |
+
)
|
197 |
+
|
198 |
```
|
199 |
|
200 |
Step 3: **Verify on-device accuracy**
|
|
|
203 |
on sample input data on the same cloud hosted device.
|
204 |
```python
|
205 |
input_data = torch_model.sample_inputs()
|
206 |
+
inference_job = hub.submit_inference_job(
|
207 |
+
model=target_model,
|
208 |
+
device=device,
|
209 |
+
inputs=input_data,
|
210 |
+
)
|
211 |
+
on_device_output = inference_job.download_output_data()
|
|
|
212 |
|
213 |
```
|
214 |
With the output of the model, you can compute like PSNR, relative errors or
|