whisperkittools-539b2f3b441b2ce4bba6be007f460017b5ca6796 generated files: openai_whisper-tiny.en
Browse files- openai_whisper-tiny.en/AudioEncoder.mlmodelc/analytics/coremldata.bin +1 -1
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/coremldata.bin +1 -1
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json +1 -1
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil +1 -1
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil +7 -7
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin +1 -1
openai_whisper-tiny.en/AudioEncoder.mlmodelc/analytics/coremldata.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 243
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7cda5508b558cd2a860042f95741a84c35ba1fee1902eb0255e2600108893790
|
3 |
size 243
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/coremldata.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 347
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e9801fef395eda1b4d1c1be4e68458f81858bbfc5823caaa1d7c2681fdad8b0
|
3 |
size 347
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json
CHANGED
@@ -48,7 +48,7 @@
|
|
48 |
},
|
49 |
"userDefinedMetadata" : {
|
50 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
51 |
-
"com.github.apple.coremltools.source" : "torch==2.
|
52 |
"com.github.apple.coremltools.version" : "7.1"
|
53 |
},
|
54 |
"inputSchema" : [
|
|
|
48 |
},
|
49 |
"userDefinedMetadata" : {
|
50 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
51 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0",
|
52 |
"com.github.apple.coremltools.version" : "7.1"
|
53 |
},
|
54 |
"inputSchema" : [
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
program(1.0)
|
2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.
|
3 |
{
|
4 |
func main<ios17>(tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features) {
|
5 |
tensor<int32, [2]> var_34 = const()[name = tensor<string, []>("op_34"), val = tensor<int32, [2]>([1, 1])];
|
|
|
1 |
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
|
3 |
{
|
4 |
func main<ios17>(tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features) {
|
5 |
tensor<int32, [2]> var_34 = const()[name = tensor<string, []>("op_34"), val = tensor<int32, [2]>([1, 1])];
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 16422784
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a98382d1979dfa5008ba80cbd86534a8150daa5f82dddc8e035c7c859e91291
|
3 |
size 16422784
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 243
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bed6767841a6989471a2fed1c45df57f59ed760ee16582db10af41ba3718108
|
3 |
size 243
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 328
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4f7a3c93a17310fbce74eedea8d7eec69a5ba1a14c5780e454486c508170d31
|
3 |
size 328
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json
CHANGED
@@ -50,7 +50,7 @@
|
|
50 |
},
|
51 |
"userDefinedMetadata" : {
|
52 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
53 |
-
"com.github.apple.coremltools.source" : "torch==2.
|
54 |
"com.github.apple.coremltools.version" : "7.1"
|
55 |
},
|
56 |
"inputSchema" : [
|
|
|
50 |
},
|
51 |
"userDefinedMetadata" : {
|
52 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
53 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0",
|
54 |
"com.github.apple.coremltools.version" : "7.1"
|
55 |
},
|
56 |
"inputSchema" : [
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
program(1.0)
|
2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.
|
3 |
{
|
4 |
func main<ios17>(tensor<fp16, [480000]> audio) {
|
5 |
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
6 |
-
tensor<fp16, [1, 1, 480000]>
|
7 |
-
tensor<int32, [6]>
|
8 |
-
tensor<string, []>
|
9 |
-
tensor<fp16, []>
|
10 |
-
tensor<fp16, [1, 1, 480400]>
|
11 |
tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
|
12 |
-
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x =
|
13 |
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
14 |
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
15 |
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
|
|
1 |
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
|
3 |
{
|
4 |
func main<ios17>(tensor<fp16, [480000]> audio) {
|
5 |
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
6 |
+
tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
|
7 |
+
tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
|
8 |
+
tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
|
9 |
+
tensor<fp16, []> input_3_constant_val_0_to_fp16 = const()[name = tensor<string, []>("input_3_constant_val_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
10 |
+
tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = input_3_constant_val_0_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
11 |
tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
|
12 |
+
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
13 |
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
14 |
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
15 |
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 354080
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf46592e84b4a5d8aa4b965a140b69fc825c213e0f7e8786586e004358a44a07
|
3 |
size 354080
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 243
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:670deacb69e3ecf2687317eeab108187a1b7d1627c4f9a51f0fb7ac18e7b8e82
|
3 |
size 243
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 593
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6aa7666fbb0d159ea29f065c01777ff2f67cfb163789161151e6c09a1605659
|
3 |
size 593
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json
CHANGED
@@ -74,7 +74,7 @@
|
|
74 |
},
|
75 |
"userDefinedMetadata" : {
|
76 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
77 |
-
"com.github.apple.coremltools.source" : "torch==2.
|
78 |
"com.github.apple.coremltools.version" : "7.1"
|
79 |
},
|
80 |
"inputSchema" : [
|
|
|
74 |
},
|
75 |
"userDefinedMetadata" : {
|
76 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
77 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0",
|
78 |
"com.github.apple.coremltools.version" : "7.1"
|
79 |
},
|
80 |
"inputSchema" : [
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
program(1.0)
|
2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.
|
3 |
{
|
4 |
func main<ios17>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 448]> value_cache) {
|
5 |
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
|
|
|
1 |
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
|
3 |
{
|
4 |
func main<ios17>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 448]> value_cache) {
|
5 |
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 59215664
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c80c03c5dfefe746e9b8a571393a7230c4beaa540635228b86dd293e3377cca9
|
3 |
size 59215664
|