aotrih commited on
Commit
596b32b
1 Parent(s): 0ae83f5

Add TextDecoderContextPrefil

Browse files
openai_whisper-large-v3_turbo/TextDecoderContextPrefill.mlmodelc/analytics/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9260e9b54eccf2870c2380de6f5844a1caf07d69c3cef1f7838de346766bd7a
3
  size 243
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a25601f7f75bf3df5d409d7c868951386b9b9baf79c5af0983b9b219a5978d8
3
  size 243
openai_whisper-large-v3_turbo/TextDecoderContextPrefill.mlmodelc/model.mil CHANGED
@@ -10,7 +10,7 @@ program(1.0)
10
  tensor<int32, []> var_15_axis_0 = const()[name = tensor<string, []>("op_15_axis_0"), val = tensor<int32, []>(0)];
11
  tensor<int32, []> var_15_batch_dims_0 = const()[name = tensor<string, []>("op_15_batch_dims_0"), val = tensor<int32, []>(0)];
12
  tensor<bool, []> var_15_validate_indices_0 = const()[name = tensor<string, []>("op_15_validate_indices_0"), val = tensor<bool, []>(false)];
13
- tensor<fp16, [6, 122880]> key_cache_lut_weight_to_fp16 = const()[name = tensor<string, []>("key_cache_lut_weight_to_fp16"), val = tensor<fp16, [6, 122880]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
14
  tensor<string, []> input_to_int16_dtype_0 = const()[name = tensor<string, []>("input_to_int16_dtype_0"), val = tensor<string, []>("int16")];
15
  tensor<int16, [1]> cast_6 = cast(dtype = input_to_int16_dtype_0, x = input)[name = tensor<string, []>("cast_6")];
16
  tensor<fp16, [1, 122880]> var_15_cast_fp16_cast_int16 = gather(axis = var_15_axis_0, batch_dims = var_15_batch_dims_0, indices = cast_6, validate_indices = var_15_validate_indices_0, x = key_cache_lut_weight_to_fp16)[name = tensor<string, []>("op_15_cast_fp16_cast_int16")];
@@ -19,7 +19,7 @@ program(1.0)
19
  tensor<int32, []> var_25_axis_0 = const()[name = tensor<string, []>("op_25_axis_0"), val = tensor<int32, []>(0)];
20
  tensor<int32, []> var_25_batch_dims_0 = const()[name = tensor<string, []>("op_25_batch_dims_0"), val = tensor<int32, []>(0)];
21
  tensor<bool, []> var_25_validate_indices_0 = const()[name = tensor<string, []>("op_25_validate_indices_0"), val = tensor<bool, []>(false)];
22
- tensor<fp16, [6, 122880]> value_cache_lut_weight_to_fp16 = const()[name = tensor<string, []>("value_cache_lut_weight_to_fp16"), val = tensor<fp16, [6, 122880]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1474688)))];
23
  tensor<fp16, [1, 122880]> var_25_cast_fp16_cast_int16 = gather(axis = var_25_axis_0, batch_dims = var_25_batch_dims_0, indices = cast_6, validate_indices = var_25_validate_indices_0, x = value_cache_lut_weight_to_fp16)[name = tensor<string, []>("op_25_cast_fp16_cast_int16")];
24
  tensor<int32, [4]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [4]>([1, 40960, 1, 3])];
25
  tensor<fp16, [1, 40960, 1, 3]> value_cache_prefill = reshape(shape = var_30, x = var_25_cast_fp16_cast_int16)[name = tensor<string, []>("op_31_cast_fp16")];
 
10
  tensor<int32, []> var_15_axis_0 = const()[name = tensor<string, []>("op_15_axis_0"), val = tensor<int32, []>(0)];
11
  tensor<int32, []> var_15_batch_dims_0 = const()[name = tensor<string, []>("op_15_batch_dims_0"), val = tensor<int32, []>(0)];
12
  tensor<bool, []> var_15_validate_indices_0 = const()[name = tensor<string, []>("op_15_validate_indices_0"), val = tensor<bool, []>(false)];
13
+ tensor<fp16, [200, 122880]> key_cache_lut_weight_to_fp16 = const()[name = tensor<string, []>("key_cache_lut_weight_to_fp16"), val = tensor<fp16, [200, 122880]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
14
  tensor<string, []> input_to_int16_dtype_0 = const()[name = tensor<string, []>("input_to_int16_dtype_0"), val = tensor<string, []>("int16")];
15
  tensor<int16, [1]> cast_6 = cast(dtype = input_to_int16_dtype_0, x = input)[name = tensor<string, []>("cast_6")];
16
  tensor<fp16, [1, 122880]> var_15_cast_fp16_cast_int16 = gather(axis = var_15_axis_0, batch_dims = var_15_batch_dims_0, indices = cast_6, validate_indices = var_15_validate_indices_0, x = key_cache_lut_weight_to_fp16)[name = tensor<string, []>("op_15_cast_fp16_cast_int16")];
 
19
  tensor<int32, []> var_25_axis_0 = const()[name = tensor<string, []>("op_25_axis_0"), val = tensor<int32, []>(0)];
20
  tensor<int32, []> var_25_batch_dims_0 = const()[name = tensor<string, []>("op_25_batch_dims_0"), val = tensor<int32, []>(0)];
21
  tensor<bool, []> var_25_validate_indices_0 = const()[name = tensor<string, []>("op_25_validate_indices_0"), val = tensor<bool, []>(false)];
22
+ tensor<fp16, [200, 122880]> value_cache_lut_weight_to_fp16 = const()[name = tensor<string, []>("value_cache_lut_weight_to_fp16"), val = tensor<fp16, [200, 122880]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49152128)))];
23
  tensor<fp16, [1, 122880]> var_25_cast_fp16_cast_int16 = gather(axis = var_25_axis_0, batch_dims = var_25_batch_dims_0, indices = cast_6, validate_indices = var_25_validate_indices_0, x = value_cache_lut_weight_to_fp16)[name = tensor<string, []>("op_25_cast_fp16_cast_int16")];
24
  tensor<int32, [4]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [4]>([1, 40960, 1, 3])];
25
  tensor<fp16, [1, 40960, 1, 3]> value_cache_prefill = reshape(shape = var_30, x = var_25_cast_fp16_cast_int16)[name = tensor<string, []>("op_31_cast_fp16")];
openai_whisper-large-v3_turbo/TextDecoderContextPrefill.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5ca46b0f7d143543d60f041b247acd38faac9bdd6db8cd554fc2eac51d090ab
3
- size 2949312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4457ee28e369c2f1d7858370352085554a1fb6def48db64f4f4ab27c7015bc
3
+ size 98304192