smpanaro commited on
Commit
e97e085
1 Parent(s): e8fcc94

Add models

Browse files
Files changed (38) hide show
  1. Llama-3.2-1B-Instruct_chunk1.mlmodelc/analytics/coremldata.bin +3 -0
  2. Llama-3.2-1B-Instruct_chunk1.mlmodelc/coremldata.bin +3 -0
  3. Llama-3.2-1B-Instruct_chunk1.mlmodelc/metadata.json +105 -0
  4. Llama-3.2-1B-Instruct_chunk1.mlmodelc/model.mil +50 -0
  5. Llama-3.2-1B-Instruct_chunk1.mlmodelc/weights/weight.bin +3 -0
  6. Llama-3.2-1B-Instruct_chunk2.mlmodelc/analytics/coremldata.bin +3 -0
  7. Llama-3.2-1B-Instruct_chunk2.mlmodelc/coremldata.bin +3 -0
  8. Llama-3.2-1B-Instruct_chunk2.mlmodelc/metadata.json +258 -0
  9. Llama-3.2-1B-Instruct_chunk2.mlmodelc/model.mil +0 -0
  10. Llama-3.2-1B-Instruct_chunk2.mlmodelc/weights/weight.bin +3 -0
  11. Llama-3.2-1B-Instruct_chunk3.mlmodelc/analytics/coremldata.bin +3 -0
  12. Llama-3.2-1B-Instruct_chunk3.mlmodelc/coremldata.bin +3 -0
  13. Llama-3.2-1B-Instruct_chunk3.mlmodelc/metadata.json +258 -0
  14. Llama-3.2-1B-Instruct_chunk3.mlmodelc/model.mil +0 -0
  15. Llama-3.2-1B-Instruct_chunk3.mlmodelc/weights/weight.bin +3 -0
  16. Llama-3.2-1B-Instruct_chunk4.mlmodelc/analytics/coremldata.bin +3 -0
  17. Llama-3.2-1B-Instruct_chunk4.mlmodelc/coremldata.bin +3 -0
  18. Llama-3.2-1B-Instruct_chunk4.mlmodelc/metadata.json +258 -0
  19. Llama-3.2-1B-Instruct_chunk4.mlmodelc/model.mil +0 -0
  20. Llama-3.2-1B-Instruct_chunk4.mlmodelc/weights/weight.bin +3 -0
  21. Llama-3.2-1B-Instruct_chunk5.mlmodelc/analytics/coremldata.bin +3 -0
  22. Llama-3.2-1B-Instruct_chunk5.mlmodelc/coremldata.bin +3 -0
  23. Llama-3.2-1B-Instruct_chunk5.mlmodelc/metadata.json +258 -0
  24. Llama-3.2-1B-Instruct_chunk5.mlmodelc/model.mil +0 -0
  25. Llama-3.2-1B-Instruct_chunk5.mlmodelc/weights/weight.bin +3 -0
  26. Llama-3.2-1B-Instruct_chunk6.mlmodelc/analytics/coremldata.bin +3 -0
  27. Llama-3.2-1B-Instruct_chunk6.mlmodelc/coremldata.bin +3 -0
  28. Llama-3.2-1B-Instruct_chunk6.mlmodelc/metadata.json +64 -0
  29. Llama-3.2-1B-Instruct_chunk6.mlmodelc/model.mil +77 -0
  30. Llama-3.2-1B-Instruct_chunk6.mlmodelc/weights/weight.bin +3 -0
  31. cache-processor.mlmodelc/analytics/coremldata.bin +3 -0
  32. cache-processor.mlmodelc/coremldata.bin +3 -0
  33. cache-processor.mlmodelc/metadata.json +109 -0
  34. cache-processor.mlmodelc/model.mil +24 -0
  35. logit-processor.mlmodelc/analytics/coremldata.bin +3 -0
  36. logit-processor.mlmodelc/coremldata.bin +3 -0
  37. logit-processor.mlmodelc/metadata.json +58 -0
  38. logit-processor.mlmodelc/model.mil +9 -0
Llama-3.2-1B-Instruct_chunk1.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1fdd81f7aa992caa1f946bcb46d36fce0df7cd32f7ad9ca046e84e0bcfa9fa7
3
+ size 243
Llama-3.2-1B-Instruct_chunk1.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:029f313416e6f6e57a6e277e61397fb98e2c1b3c88aaf17c11507cb80f63b720
3
+ size 407
Llama-3.2-1B-Instruct_chunk1.mlmodelc/metadata.json ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 2048, 8, 8]",
13
+ "name" : "x",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 64 × 64)",
21
+ "shortDescription" : "",
22
+ "shape" : "[64, 64]",
23
+ "name" : "cos",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 64 × 64)",
31
+ "shortDescription" : "",
32
+ "shape" : "[64, 64]",
33
+ "name" : "sin",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 512, 1, 64]",
43
+ "name" : "mask",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 7,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Select" : 2,
53
+ "Tile" : 2,
54
+ "Ios16.sub" : 3,
55
+ "Transpose" : 2,
56
+ "Ios16.gather" : 3,
57
+ "ExpandDims" : 3,
58
+ "Ios16.reshape" : 1,
59
+ "Ios16.maximum" : 1,
60
+ "Ios16.less" : 2
61
+ },
62
+ "computePrecision" : "Mixed (Float16, Int32)",
63
+ "isUpdatable" : "0",
64
+ "availability" : {
65
+ "macOS" : "13.0",
66
+ "tvOS" : "16.0",
67
+ "visionOS" : "1.0",
68
+ "watchOS" : "9.0",
69
+ "iOS" : "16.0",
70
+ "macCatalyst" : "16.0"
71
+ },
72
+ "modelType" : {
73
+ "name" : "MLModelType_mlProgram"
74
+ },
75
+ "userDefinedMetadata" : {
76
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
77
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
78
+ "com.github.apple.coremltools.version" : "8.0b1"
79
+ },
80
+ "inputSchema" : [
81
+ {
82
+ "hasShapeFlexibility" : "0",
83
+ "isOptional" : "0",
84
+ "dataType" : "Int32",
85
+ "formattedType" : "MultiArray (Int32 1 × 64)",
86
+ "shortDescription" : "",
87
+ "shape" : "[1, 64]",
88
+ "name" : "input_ids",
89
+ "type" : "MultiArray"
90
+ },
91
+ {
92
+ "hasShapeFlexibility" : "0",
93
+ "isOptional" : "0",
94
+ "dataType" : "Int32",
95
+ "formattedType" : "MultiArray (Int32 1)",
96
+ "shortDescription" : "",
97
+ "shape" : "[1]",
98
+ "name" : "full_sequence_length",
99
+ "type" : "MultiArray"
100
+ }
101
+ ],
102
+ "generatedClassName" : "Llama_3_2_1B_Instruct_2024_10_10_23_56_41_chunk1",
103
+ "method" : "predict"
104
+ }
105
+ ]
Llama-3.2-1B-Instruct_chunk1.mlmodelc/model.mil ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0b1"}})]
3
+ {
4
+ func main<ios16>(tensor<int32, [1]> full_sequence_length, tensor<int32, [1, 64]> input_ids) {
5
+ tensor<int32, [1]> T = const()[name = tensor<string, []>("T"), val = tensor<int32, [1]>([64])];
6
+ tensor<int32, []> x_1_axis_0 = const()[name = tensor<string, []>("x_1_axis_0"), val = tensor<int32, []>(0)];
7
+ tensor<int32, []> x_1_batch_dims_0 = const()[name = tensor<string, []>("x_1_batch_dims_0"), val = tensor<int32, []>(0)];
8
+ tensor<fp16, [128256, 2048]> wte_weight_to_fp16 = const()[name = tensor<string, []>("wte_weight_to_fp16"), val = tensor<fp16, [128256, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
9
+ tensor<fp16, [1, 64, 2048]> x_1_cast_fp16 = gather(axis = x_1_axis_0, batch_dims = x_1_batch_dims_0, indices = input_ids, x = wte_weight_to_fp16)[name = tensor<string, []>("x_1_cast_fp16")];
10
+ tensor<int32, [3]> x_perm_0 = const()[name = tensor<string, []>("x_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
11
+ tensor<int32, [4]> var_27 = const()[name = tensor<string, []>("op_27"), val = tensor<int32, [4]>([1, 2048, -1, 8])];
12
+ tensor<fp16, [1, 2048, 64]> x_cast_fp16 = transpose(perm = x_perm_0, x = x_1_cast_fp16)[name = tensor<string, []>("transpose_1")];
13
+ tensor<fp16, [1, 2048, 8, 8]> x = reshape(shape = var_27, x = x_cast_fp16)[name = tensor<string, []>("op_28_cast_fp16")];
14
+ tensor<int32, [1]> pos_offset = sub(x = T, y = full_sequence_length)[name = tensor<string, []>("pos_offset")];
15
+ tensor<int32, [64]> var_36 = const()[name = tensor<string, []>("op_36"), val = tensor<int32, [64]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])];
16
+ tensor<int32, [64]> input_pos_1 = sub(x = var_36, y = pos_offset)[name = tensor<string, []>("input_pos_1")];
17
+ tensor<int32, [64]> var_44 = const()[name = tensor<string, []>("op_44"), val = tensor<int32, [64]>([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])];
18
+ tensor<int32, [64]> input_pos = maximum(x = input_pos_1, y = var_44)[name = tensor<string, []>("input_pos")];
19
+ tensor<int32, []> var_55 = const()[name = tensor<string, []>("op_55"), val = tensor<int32, []>(1)];
20
+ tensor<int32, []> cos_batch_dims_0 = const()[name = tensor<string, []>("cos_batch_dims_0"), val = tensor<int32, []>(0)];
21
+ tensor<fp16, [64, 512]> var_54_to_fp16 = const()[name = tensor<string, []>("op_54_to_fp16"), val = tensor<fp16, [64, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(525336704)))];
22
+ tensor<fp16, [64, 64]> cos = gather(axis = var_55, batch_dims = cos_batch_dims_0, indices = input_pos, x = var_54_to_fp16)[name = tensor<string, []>("cos_cast_fp16")];
23
+ tensor<int32, []> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, []>(1)];
24
+ tensor<int32, []> sin_batch_dims_0 = const()[name = tensor<string, []>("sin_batch_dims_0"), val = tensor<int32, []>(0)];
25
+ tensor<fp16, [64, 512]> var_65_to_fp16 = const()[name = tensor<string, []>("op_65_to_fp16"), val = tensor<fp16, [64, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(525402304)))];
26
+ tensor<fp16, [64, 64]> sin = gather(axis = var_66, batch_dims = sin_batch_dims_0, indices = input_pos, x = var_65_to_fp16)[name = tensor<string, []>("sin_cast_fp16")];
27
+ tensor<int32, [64, 1]> var_102 = const()[name = tensor<string, []>("op_102"), val = tensor<int32, [64, 1]>([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], [19], [20], [21], [22], [23], [24], [25], [26], [27], [28], [29], [30], [31], [32], [33], [34], [35], [36], [37], [38], [39], [40], [41], [42], [43], [44], [45], [46], [47], [48], [49], [50], [51], [52], [53], [54], [55], [56], [57], [58], [59], [60], [61], [62], [63]])];
28
+ tensor<bool, [64, 1]> var_105 = less(x = var_102, y = pos_offset)[name = tensor<string, []>("op_105")];
29
+ tensor<int32, [2]> var_105_after_broadcast_reps_0 = const()[name = tensor<string, []>("op_105_after_broadcast_reps_0"), val = tensor<int32, [2]>([1, 512])];
30
+ tensor<bool, [64, 512]> var_105_after_broadcast = tile(reps = var_105_after_broadcast_reps_0, x = var_105)[name = tensor<string, []>("op_105_after_broadcast")];
31
+ tensor<fp16, [64, 512]> all_mask_to_fp16 = const()[name = tensor<string, []>("all_mask_to_fp16"), val = tensor<fp16, [64, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(525467904)))];
32
+ tensor<fp16, [64, 512]> m_1_to_fp16 = const()[name = tensor<string, []>("m_1_to_fp16"), val = tensor<fp16, [64, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(525533504)))];
33
+ tensor<fp16, [64, 512]> m_3_cast_fp16 = select(a = all_mask_to_fp16, b = m_1_to_fp16, cond = var_105_after_broadcast)[name = tensor<string, []>("m_3_cast_fp16")];
34
+ tensor<int32, [512]> var_115 = const()[name = tensor<string, []>("op_115"), val = tensor<int32, [512]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511])];
35
+ tensor<int32, []> var_116 = const()[name = tensor<string, []>("op_116"), val = tensor<int32, []>(512)];
36
+ tensor<int32, [1]> var_118 = sub(x = var_116, y = full_sequence_length)[name = tensor<string, []>("op_118")];
37
+ tensor<bool, [512]> var_119 = less(x = var_115, y = var_118)[name = tensor<string, []>("op_119")];
38
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
39
+ tensor<bool, [1, 512]> expand_dims_0 = expand_dims(axes = expand_dims_0_axes_0, x = var_119)[name = tensor<string, []>("expand_dims_0")];
40
+ tensor<int32, [2]> var_119_after_broadcast_reps_0 = const()[name = tensor<string, []>("op_119_after_broadcast_reps_0"), val = tensor<int32, [2]>([64, 1])];
41
+ tensor<bool, [64, 512]> var_119_after_broadcast = tile(reps = var_119_after_broadcast_reps_0, x = expand_dims_0)[name = tensor<string, []>("op_119_after_broadcast")];
42
+ tensor<fp16, [64, 512]> m_cast_fp16 = select(a = all_mask_to_fp16, b = m_3_cast_fp16, cond = var_119_after_broadcast)[name = tensor<string, []>("m_cast_fp16")];
43
+ tensor<int32, [1]> var_122_axes_0 = const()[name = tensor<string, []>("op_122_axes_0"), val = tensor<int32, [1]>([0])];
44
+ tensor<fp16, [1, 64, 512]> var_122_cast_fp16 = expand_dims(axes = var_122_axes_0, x = m_cast_fp16)[name = tensor<string, []>("op_122_cast_fp16")];
45
+ tensor<int32, [1]> mask_axes_0 = const()[name = tensor<string, []>("mask_axes_0"), val = tensor<int32, [1]>([0])];
46
+ tensor<fp16, [1, 1, 64, 512]> mask_cast_fp16 = expand_dims(axes = mask_axes_0, x = var_122_cast_fp16)[name = tensor<string, []>("mask_cast_fp16")];
47
+ tensor<int32, [4]> var_129 = const()[name = tensor<string, []>("op_129"), val = tensor<int32, [4]>([0, 3, 1, 2])];
48
+ tensor<fp16, [1, 512, 1, 64]> mask = transpose(perm = var_129, x = mask_cast_fp16)[name = tensor<string, []>("transpose_0")];
49
+ } -> (x, cos, sin, mask);
50
+ }
Llama-3.2-1B-Instruct_chunk1.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5080c5c470cc23733c394cdef3dc1da1f940a45cce75d6bf9587d70f7933ae8e
3
+ size 525599104
Llama-3.2-1B-Instruct_chunk2.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa232688f37d1b639e14da601159147837ea3a6d7861e440adc8913fb9b3246f
3
+ size 243
Llama-3.2-1B-Instruct_chunk2.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbed3683f2e4450c8c82941040938b8db75f66e3348e5f757e67745654a5b4e4
3
+ size 931
Llama-3.2-1B-Instruct_chunk2.mlmodelc/metadata.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 2048, 8, 8]",
13
+ "name" : "new_x",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 64, 1, 512]",
23
+ "name" : "new_k_cache_0",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 512, 1, 64]",
33
+ "name" : "new_v_cache_0",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 64, 1, 512]",
43
+ "name" : "new_k_cache_1",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1, 512, 1, 64]",
53
+ "name" : "new_v_cache_1",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 64, 1, 512]",
63
+ "name" : "new_k_cache_2",
64
+ "type" : "MultiArray"
65
+ },
66
+ {
67
+ "hasShapeFlexibility" : "0",
68
+ "isOptional" : "0",
69
+ "dataType" : "Float16",
70
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
71
+ "shortDescription" : "",
72
+ "shape" : "[1, 512, 1, 64]",
73
+ "name" : "new_v_cache_2",
74
+ "type" : "MultiArray"
75
+ },
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 64, 1, 512]",
83
+ "name" : "new_k_cache_3",
84
+ "type" : "MultiArray"
85
+ },
86
+ {
87
+ "hasShapeFlexibility" : "0",
88
+ "isOptional" : "0",
89
+ "dataType" : "Float16",
90
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
91
+ "shortDescription" : "",
92
+ "shape" : "[1, 512, 1, 64]",
93
+ "name" : "new_v_cache_3",
94
+ "type" : "MultiArray"
95
+ }
96
+ ],
97
+ "modelParameters" : [
98
+
99
+ ],
100
+ "specificationVersion" : 7,
101
+ "mlProgramOperationTypeHistogram" : {
102
+ "Concat" : 28,
103
+ "Ios16.mul" : 172,
104
+ "SliceByIndex" : 208,
105
+ "Transpose" : 4,
106
+ "Ios16.einsum" : 256,
107
+ "Ios16.conv" : 28,
108
+ "Ios16.add" : 144,
109
+ "Ios16.realDiv" : 8,
110
+ "Ios16.softmax" : 128,
111
+ "Ios16.reduceL2Norm" : 8,
112
+ "Ios16.reshape" : 28,
113
+ "Ios16.silu" : 4
114
+ },
115
+ "computePrecision" : "Mixed (Float16, Int32)",
116
+ "isUpdatable" : "0",
117
+ "availability" : {
118
+ "macOS" : "13.0",
119
+ "tvOS" : "16.0",
120
+ "visionOS" : "1.0",
121
+ "watchOS" : "9.0",
122
+ "iOS" : "16.0",
123
+ "macCatalyst" : "16.0"
124
+ },
125
+ "modelType" : {
126
+ "name" : "MLModelType_mlProgram"
127
+ },
128
+ "userDefinedMetadata" : {
129
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
130
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
131
+ "com.github.apple.coremltools.version" : "8.0b1"
132
+ },
133
+ "inputSchema" : [
134
+ {
135
+ "hasShapeFlexibility" : "0",
136
+ "isOptional" : "0",
137
+ "dataType" : "Float16",
138
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
139
+ "shortDescription" : "",
140
+ "shape" : "[1, 2048, 8, 8]",
141
+ "name" : "x",
142
+ "type" : "MultiArray"
143
+ },
144
+ {
145
+ "hasShapeFlexibility" : "0",
146
+ "isOptional" : "0",
147
+ "dataType" : "Float16",
148
+ "formattedType" : "MultiArray (Float16 64 × 64)",
149
+ "shortDescription" : "",
150
+ "shape" : "[64, 64]",
151
+ "name" : "cos",
152
+ "type" : "MultiArray"
153
+ },
154
+ {
155
+ "hasShapeFlexibility" : "0",
156
+ "isOptional" : "0",
157
+ "dataType" : "Float16",
158
+ "formattedType" : "MultiArray (Float16 64 × 64)",
159
+ "shortDescription" : "",
160
+ "shape" : "[64, 64]",
161
+ "name" : "sin",
162
+ "type" : "MultiArray"
163
+ },
164
+ {
165
+ "hasShapeFlexibility" : "0",
166
+ "isOptional" : "0",
167
+ "dataType" : "Float16",
168
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
169
+ "shortDescription" : "",
170
+ "shape" : "[1, 512, 1, 64]",
171
+ "name" : "mask",
172
+ "type" : "MultiArray"
173
+ },
174
+ {
175
+ "hasShapeFlexibility" : "0",
176
+ "isOptional" : "1",
177
+ "dataType" : "Float16",
178
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
179
+ "shortDescription" : "",
180
+ "shape" : "[1, 448, 1, 512]",
181
+ "name" : "k_cache_0",
182
+ "type" : "MultiArray"
183
+ },
184
+ {
185
+ "hasShapeFlexibility" : "0",
186
+ "isOptional" : "1",
187
+ "dataType" : "Float16",
188
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
189
+ "shortDescription" : "",
190
+ "shape" : "[1, 512, 1, 448]",
191
+ "name" : "v_cache_0",
192
+ "type" : "MultiArray"
193
+ },
194
+ {
195
+ "hasShapeFlexibility" : "0",
196
+ "isOptional" : "1",
197
+ "dataType" : "Float16",
198
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
199
+ "shortDescription" : "",
200
+ "shape" : "[1, 448, 1, 512]",
201
+ "name" : "k_cache_1",
202
+ "type" : "MultiArray"
203
+ },
204
+ {
205
+ "hasShapeFlexibility" : "0",
206
+ "isOptional" : "1",
207
+ "dataType" : "Float16",
208
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
209
+ "shortDescription" : "",
210
+ "shape" : "[1, 512, 1, 448]",
211
+ "name" : "v_cache_1",
212
+ "type" : "MultiArray"
213
+ },
214
+ {
215
+ "hasShapeFlexibility" : "0",
216
+ "isOptional" : "1",
217
+ "dataType" : "Float16",
218
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
219
+ "shortDescription" : "",
220
+ "shape" : "[1, 448, 1, 512]",
221
+ "name" : "k_cache_2",
222
+ "type" : "MultiArray"
223
+ },
224
+ {
225
+ "hasShapeFlexibility" : "0",
226
+ "isOptional" : "1",
227
+ "dataType" : "Float16",
228
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
229
+ "shortDescription" : "",
230
+ "shape" : "[1, 512, 1, 448]",
231
+ "name" : "v_cache_2",
232
+ "type" : "MultiArray"
233
+ },
234
+ {
235
+ "hasShapeFlexibility" : "0",
236
+ "isOptional" : "1",
237
+ "dataType" : "Float16",
238
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
239
+ "shortDescription" : "",
240
+ "shape" : "[1, 448, 1, 512]",
241
+ "name" : "k_cache_3",
242
+ "type" : "MultiArray"
243
+ },
244
+ {
245
+ "hasShapeFlexibility" : "0",
246
+ "isOptional" : "1",
247
+ "dataType" : "Float16",
248
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
249
+ "shortDescription" : "",
250
+ "shape" : "[1, 512, 1, 448]",
251
+ "name" : "v_cache_3",
252
+ "type" : "MultiArray"
253
+ }
254
+ ],
255
+ "generatedClassName" : "Llama_3_2_1B_Instruct_2024_10_10_23_56_41_chunk2",
256
+ "method" : "predict"
257
+ }
258
+ ]
Llama-3.2-1B-Instruct_chunk2.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
Llama-3.2-1B-Instruct_chunk2.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23c3f4ed69a73c80f959d0e977ff7677c380a3e8f06a8af84f2003f5cdd81c5b
3
+ size 486575936
Llama-3.2-1B-Instruct_chunk3.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa232688f37d1b639e14da601159147837ea3a6d7861e440adc8913fb9b3246f
3
+ size 243
Llama-3.2-1B-Instruct_chunk3.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:507886ccf77d43d177e993f226218bda5b20448ffeb13ac2e9f6a768ffb75b70
3
+ size 931
Llama-3.2-1B-Instruct_chunk3.mlmodelc/metadata.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 2048, 8, 8]",
13
+ "name" : "new_x",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 64, 1, 512]",
23
+ "name" : "new_k_cache_0",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 512, 1, 64]",
33
+ "name" : "new_v_cache_0",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 64, 1, 512]",
43
+ "name" : "new_k_cache_1",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1, 512, 1, 64]",
53
+ "name" : "new_v_cache_1",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 64, 1, 512]",
63
+ "name" : "new_k_cache_2",
64
+ "type" : "MultiArray"
65
+ },
66
+ {
67
+ "hasShapeFlexibility" : "0",
68
+ "isOptional" : "0",
69
+ "dataType" : "Float16",
70
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
71
+ "shortDescription" : "",
72
+ "shape" : "[1, 512, 1, 64]",
73
+ "name" : "new_v_cache_2",
74
+ "type" : "MultiArray"
75
+ },
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 64, 1, 512]",
83
+ "name" : "new_k_cache_3",
84
+ "type" : "MultiArray"
85
+ },
86
+ {
87
+ "hasShapeFlexibility" : "0",
88
+ "isOptional" : "0",
89
+ "dataType" : "Float16",
90
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
91
+ "shortDescription" : "",
92
+ "shape" : "[1, 512, 1, 64]",
93
+ "name" : "new_v_cache_3",
94
+ "type" : "MultiArray"
95
+ }
96
+ ],
97
+ "modelParameters" : [
98
+
99
+ ],
100
+ "specificationVersion" : 7,
101
+ "mlProgramOperationTypeHistogram" : {
102
+ "Concat" : 28,
103
+ "Ios16.mul" : 172,
104
+ "SliceByIndex" : 208,
105
+ "Transpose" : 4,
106
+ "Ios16.einsum" : 256,
107
+ "Ios16.conv" : 28,
108
+ "Ios16.add" : 144,
109
+ "Ios16.realDiv" : 8,
110
+ "Ios16.softmax" : 128,
111
+ "Ios16.reduceL2Norm" : 8,
112
+ "Ios16.reshape" : 28,
113
+ "Ios16.silu" : 4
114
+ },
115
+ "computePrecision" : "Mixed (Float16, Int32)",
116
+ "isUpdatable" : "0",
117
+ "availability" : {
118
+ "macOS" : "13.0",
119
+ "tvOS" : "16.0",
120
+ "visionOS" : "1.0",
121
+ "watchOS" : "9.0",
122
+ "iOS" : "16.0",
123
+ "macCatalyst" : "16.0"
124
+ },
125
+ "modelType" : {
126
+ "name" : "MLModelType_mlProgram"
127
+ },
128
+ "userDefinedMetadata" : {
129
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
130
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
131
+ "com.github.apple.coremltools.version" : "8.0b1"
132
+ },
133
+ "inputSchema" : [
134
+ {
135
+ "hasShapeFlexibility" : "0",
136
+ "isOptional" : "0",
137
+ "dataType" : "Float16",
138
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
139
+ "shortDescription" : "",
140
+ "shape" : "[1, 2048, 8, 8]",
141
+ "name" : "x",
142
+ "type" : "MultiArray"
143
+ },
144
+ {
145
+ "hasShapeFlexibility" : "0",
146
+ "isOptional" : "0",
147
+ "dataType" : "Float16",
148
+ "formattedType" : "MultiArray (Float16 64 × 64)",
149
+ "shortDescription" : "",
150
+ "shape" : "[64, 64]",
151
+ "name" : "cos",
152
+ "type" : "MultiArray"
153
+ },
154
+ {
155
+ "hasShapeFlexibility" : "0",
156
+ "isOptional" : "0",
157
+ "dataType" : "Float16",
158
+ "formattedType" : "MultiArray (Float16 64 × 64)",
159
+ "shortDescription" : "",
160
+ "shape" : "[64, 64]",
161
+ "name" : "sin",
162
+ "type" : "MultiArray"
163
+ },
164
+ {
165
+ "hasShapeFlexibility" : "0",
166
+ "isOptional" : "0",
167
+ "dataType" : "Float16",
168
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
169
+ "shortDescription" : "",
170
+ "shape" : "[1, 512, 1, 64]",
171
+ "name" : "mask",
172
+ "type" : "MultiArray"
173
+ },
174
+ {
175
+ "hasShapeFlexibility" : "0",
176
+ "isOptional" : "1",
177
+ "dataType" : "Float16",
178
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
179
+ "shortDescription" : "",
180
+ "shape" : "[1, 448, 1, 512]",
181
+ "name" : "k_cache_0",
182
+ "type" : "MultiArray"
183
+ },
184
+ {
185
+ "hasShapeFlexibility" : "0",
186
+ "isOptional" : "1",
187
+ "dataType" : "Float16",
188
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
189
+ "shortDescription" : "",
190
+ "shape" : "[1, 512, 1, 448]",
191
+ "name" : "v_cache_0",
192
+ "type" : "MultiArray"
193
+ },
194
+ {
195
+ "hasShapeFlexibility" : "0",
196
+ "isOptional" : "1",
197
+ "dataType" : "Float16",
198
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
199
+ "shortDescription" : "",
200
+ "shape" : "[1, 448, 1, 512]",
201
+ "name" : "k_cache_1",
202
+ "type" : "MultiArray"
203
+ },
204
+ {
205
+ "hasShapeFlexibility" : "0",
206
+ "isOptional" : "1",
207
+ "dataType" : "Float16",
208
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
209
+ "shortDescription" : "",
210
+ "shape" : "[1, 512, 1, 448]",
211
+ "name" : "v_cache_1",
212
+ "type" : "MultiArray"
213
+ },
214
+ {
215
+ "hasShapeFlexibility" : "0",
216
+ "isOptional" : "1",
217
+ "dataType" : "Float16",
218
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
219
+ "shortDescription" : "",
220
+ "shape" : "[1, 448, 1, 512]",
221
+ "name" : "k_cache_2",
222
+ "type" : "MultiArray"
223
+ },
224
+ {
225
+ "hasShapeFlexibility" : "0",
226
+ "isOptional" : "1",
227
+ "dataType" : "Float16",
228
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
229
+ "shortDescription" : "",
230
+ "shape" : "[1, 512, 1, 448]",
231
+ "name" : "v_cache_2",
232
+ "type" : "MultiArray"
233
+ },
234
+ {
235
+ "hasShapeFlexibility" : "0",
236
+ "isOptional" : "1",
237
+ "dataType" : "Float16",
238
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
239
+ "shortDescription" : "",
240
+ "shape" : "[1, 448, 1, 512]",
241
+ "name" : "k_cache_3",
242
+ "type" : "MultiArray"
243
+ },
244
+ {
245
+ "hasShapeFlexibility" : "0",
246
+ "isOptional" : "1",
247
+ "dataType" : "Float16",
248
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
249
+ "shortDescription" : "",
250
+ "shape" : "[1, 512, 1, 448]",
251
+ "name" : "v_cache_3",
252
+ "type" : "MultiArray"
253
+ }
254
+ ],
255
+ "generatedClassName" : "Llama_3_2_1B_Instruct_2024_10_10_23_56_41_chunk3",
256
+ "method" : "predict"
257
+ }
258
+ ]
Llama-3.2-1B-Instruct_chunk3.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
Llama-3.2-1B-Instruct_chunk3.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eddf0e85ab72700254696c13b5cb50e1fd525167a03d6d99aed4ec0c11867a4e
3
+ size 486575936
Llama-3.2-1B-Instruct_chunk4.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa232688f37d1b639e14da601159147837ea3a6d7861e440adc8913fb9b3246f
3
+ size 243
Llama-3.2-1B-Instruct_chunk4.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:507886ccf77d43d177e993f226218bda5b20448ffeb13ac2e9f6a768ffb75b70
3
+ size 931
Llama-3.2-1B-Instruct_chunk4.mlmodelc/metadata.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 2048, 8, 8]",
13
+ "name" : "new_x",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 64, 1, 512]",
23
+ "name" : "new_k_cache_0",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 512, 1, 64]",
33
+ "name" : "new_v_cache_0",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 64, 1, 512]",
43
+ "name" : "new_k_cache_1",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1, 512, 1, 64]",
53
+ "name" : "new_v_cache_1",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 64, 1, 512]",
63
+ "name" : "new_k_cache_2",
64
+ "type" : "MultiArray"
65
+ },
66
+ {
67
+ "hasShapeFlexibility" : "0",
68
+ "isOptional" : "0",
69
+ "dataType" : "Float16",
70
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
71
+ "shortDescription" : "",
72
+ "shape" : "[1, 512, 1, 64]",
73
+ "name" : "new_v_cache_2",
74
+ "type" : "MultiArray"
75
+ },
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 64, 1, 512]",
83
+ "name" : "new_k_cache_3",
84
+ "type" : "MultiArray"
85
+ },
86
+ {
87
+ "hasShapeFlexibility" : "0",
88
+ "isOptional" : "0",
89
+ "dataType" : "Float16",
90
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
91
+ "shortDescription" : "",
92
+ "shape" : "[1, 512, 1, 64]",
93
+ "name" : "new_v_cache_3",
94
+ "type" : "MultiArray"
95
+ }
96
+ ],
97
+ "modelParameters" : [
98
+
99
+ ],
100
+ "specificationVersion" : 7,
101
+ "mlProgramOperationTypeHistogram" : {
102
+ "Concat" : 28,
103
+ "Ios16.mul" : 172,
104
+ "SliceByIndex" : 208,
105
+ "Transpose" : 4,
106
+ "Ios16.einsum" : 256,
107
+ "Ios16.conv" : 28,
108
+ "Ios16.add" : 144,
109
+ "Ios16.realDiv" : 8,
110
+ "Ios16.softmax" : 128,
111
+ "Ios16.reduceL2Norm" : 8,
112
+ "Ios16.reshape" : 28,
113
+ "Ios16.silu" : 4
114
+ },
115
+ "computePrecision" : "Mixed (Float16, Int32)",
116
+ "isUpdatable" : "0",
117
+ "availability" : {
118
+ "macOS" : "13.0",
119
+ "tvOS" : "16.0",
120
+ "visionOS" : "1.0",
121
+ "watchOS" : "9.0",
122
+ "iOS" : "16.0",
123
+ "macCatalyst" : "16.0"
124
+ },
125
+ "modelType" : {
126
+ "name" : "MLModelType_mlProgram"
127
+ },
128
+ "userDefinedMetadata" : {
129
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
130
+ "com.github.apple.coremltools.version" : "8.0b1",
131
+ "com.github.apple.coremltools.source" : "torch==2.1.0"
132
+ },
133
+ "inputSchema" : [
134
+ {
135
+ "hasShapeFlexibility" : "0",
136
+ "isOptional" : "0",
137
+ "dataType" : "Float16",
138
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
139
+ "shortDescription" : "",
140
+ "shape" : "[1, 2048, 8, 8]",
141
+ "name" : "x",
142
+ "type" : "MultiArray"
143
+ },
144
+ {
145
+ "hasShapeFlexibility" : "0",
146
+ "isOptional" : "0",
147
+ "dataType" : "Float16",
148
+ "formattedType" : "MultiArray (Float16 64 × 64)",
149
+ "shortDescription" : "",
150
+ "shape" : "[64, 64]",
151
+ "name" : "cos",
152
+ "type" : "MultiArray"
153
+ },
154
+ {
155
+ "hasShapeFlexibility" : "0",
156
+ "isOptional" : "0",
157
+ "dataType" : "Float16",
158
+ "formattedType" : "MultiArray (Float16 64 × 64)",
159
+ "shortDescription" : "",
160
+ "shape" : "[64, 64]",
161
+ "name" : "sin",
162
+ "type" : "MultiArray"
163
+ },
164
+ {
165
+ "hasShapeFlexibility" : "0",
166
+ "isOptional" : "0",
167
+ "dataType" : "Float16",
168
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
169
+ "shortDescription" : "",
170
+ "shape" : "[1, 512, 1, 64]",
171
+ "name" : "mask",
172
+ "type" : "MultiArray"
173
+ },
174
+ {
175
+ "hasShapeFlexibility" : "0",
176
+ "isOptional" : "1",
177
+ "dataType" : "Float16",
178
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
179
+ "shortDescription" : "",
180
+ "shape" : "[1, 448, 1, 512]",
181
+ "name" : "k_cache_0",
182
+ "type" : "MultiArray"
183
+ },
184
+ {
185
+ "hasShapeFlexibility" : "0",
186
+ "isOptional" : "1",
187
+ "dataType" : "Float16",
188
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
189
+ "shortDescription" : "",
190
+ "shape" : "[1, 512, 1, 448]",
191
+ "name" : "v_cache_0",
192
+ "type" : "MultiArray"
193
+ },
194
+ {
195
+ "hasShapeFlexibility" : "0",
196
+ "isOptional" : "1",
197
+ "dataType" : "Float16",
198
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
199
+ "shortDescription" : "",
200
+ "shape" : "[1, 448, 1, 512]",
201
+ "name" : "k_cache_1",
202
+ "type" : "MultiArray"
203
+ },
204
+ {
205
+ "hasShapeFlexibility" : "0",
206
+ "isOptional" : "1",
207
+ "dataType" : "Float16",
208
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
209
+ "shortDescription" : "",
210
+ "shape" : "[1, 512, 1, 448]",
211
+ "name" : "v_cache_1",
212
+ "type" : "MultiArray"
213
+ },
214
+ {
215
+ "hasShapeFlexibility" : "0",
216
+ "isOptional" : "1",
217
+ "dataType" : "Float16",
218
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
219
+ "shortDescription" : "",
220
+ "shape" : "[1, 448, 1, 512]",
221
+ "name" : "k_cache_2",
222
+ "type" : "MultiArray"
223
+ },
224
+ {
225
+ "hasShapeFlexibility" : "0",
226
+ "isOptional" : "1",
227
+ "dataType" : "Float16",
228
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
229
+ "shortDescription" : "",
230
+ "shape" : "[1, 512, 1, 448]",
231
+ "name" : "v_cache_2",
232
+ "type" : "MultiArray"
233
+ },
234
+ {
235
+ "hasShapeFlexibility" : "0",
236
+ "isOptional" : "1",
237
+ "dataType" : "Float16",
238
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
239
+ "shortDescription" : "",
240
+ "shape" : "[1, 448, 1, 512]",
241
+ "name" : "k_cache_3",
242
+ "type" : "MultiArray"
243
+ },
244
+ {
245
+ "hasShapeFlexibility" : "0",
246
+ "isOptional" : "1",
247
+ "dataType" : "Float16",
248
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
249
+ "shortDescription" : "",
250
+ "shape" : "[1, 512, 1, 448]",
251
+ "name" : "v_cache_3",
252
+ "type" : "MultiArray"
253
+ }
254
+ ],
255
+ "generatedClassName" : "Llama_3_2_1B_Instruct_2024_10_10_23_56_41_chunk4",
256
+ "method" : "predict"
257
+ }
258
+ ]
Llama-3.2-1B-Instruct_chunk4.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
Llama-3.2-1B-Instruct_chunk4.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d882d8368974eb82dcef65c0fa5664b29da2ec73bdaf1d2798f5f923761197b
3
+ size 486575936
Llama-3.2-1B-Instruct_chunk5.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa232688f37d1b639e14da601159147837ea3a6d7861e440adc8913fb9b3246f
3
+ size 243
Llama-3.2-1B-Instruct_chunk5.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbed3683f2e4450c8c82941040938b8db75f66e3348e5f757e67745654a5b4e4
3
+ size 931
Llama-3.2-1B-Instruct_chunk5.mlmodelc/metadata.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 2048, 8, 8]",
13
+ "name" : "new_x",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 64, 1, 512]",
23
+ "name" : "new_k_cache_0",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 512, 1, 64]",
33
+ "name" : "new_v_cache_0",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 64, 1, 512]",
43
+ "name" : "new_k_cache_1",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1, 512, 1, 64]",
53
+ "name" : "new_v_cache_1",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 64, 1, 512]",
63
+ "name" : "new_k_cache_2",
64
+ "type" : "MultiArray"
65
+ },
66
+ {
67
+ "hasShapeFlexibility" : "0",
68
+ "isOptional" : "0",
69
+ "dataType" : "Float16",
70
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
71
+ "shortDescription" : "",
72
+ "shape" : "[1, 512, 1, 64]",
73
+ "name" : "new_v_cache_2",
74
+ "type" : "MultiArray"
75
+ },
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 64, 1, 512]",
83
+ "name" : "new_k_cache_3",
84
+ "type" : "MultiArray"
85
+ },
86
+ {
87
+ "hasShapeFlexibility" : "0",
88
+ "isOptional" : "0",
89
+ "dataType" : "Float16",
90
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
91
+ "shortDescription" : "",
92
+ "shape" : "[1, 512, 1, 64]",
93
+ "name" : "new_v_cache_3",
94
+ "type" : "MultiArray"
95
+ }
96
+ ],
97
+ "modelParameters" : [
98
+
99
+ ],
100
+ "specificationVersion" : 7,
101
+ "mlProgramOperationTypeHistogram" : {
102
+ "Concat" : 28,
103
+ "Ios16.mul" : 172,
104
+ "SliceByIndex" : 208,
105
+ "Transpose" : 4,
106
+ "Ios16.einsum" : 256,
107
+ "Ios16.conv" : 28,
108
+ "Ios16.add" : 144,
109
+ "Ios16.realDiv" : 8,
110
+ "Ios16.softmax" : 128,
111
+ "Ios16.reduceL2Norm" : 8,
112
+ "Ios16.reshape" : 28,
113
+ "Ios16.silu" : 4
114
+ },
115
+ "computePrecision" : "Mixed (Float16, Int32)",
116
+ "isUpdatable" : "0",
117
+ "availability" : {
118
+ "macOS" : "13.0",
119
+ "tvOS" : "16.0",
120
+ "visionOS" : "1.0",
121
+ "watchOS" : "9.0",
122
+ "iOS" : "16.0",
123
+ "macCatalyst" : "16.0"
124
+ },
125
+ "modelType" : {
126
+ "name" : "MLModelType_mlProgram"
127
+ },
128
+ "userDefinedMetadata" : {
129
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
130
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
131
+ "com.github.apple.coremltools.version" : "8.0b1"
132
+ },
133
+ "inputSchema" : [
134
+ {
135
+ "hasShapeFlexibility" : "0",
136
+ "isOptional" : "0",
137
+ "dataType" : "Float16",
138
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
139
+ "shortDescription" : "",
140
+ "shape" : "[1, 2048, 8, 8]",
141
+ "name" : "x",
142
+ "type" : "MultiArray"
143
+ },
144
+ {
145
+ "hasShapeFlexibility" : "0",
146
+ "isOptional" : "0",
147
+ "dataType" : "Float16",
148
+ "formattedType" : "MultiArray (Float16 64 × 64)",
149
+ "shortDescription" : "",
150
+ "shape" : "[64, 64]",
151
+ "name" : "cos",
152
+ "type" : "MultiArray"
153
+ },
154
+ {
155
+ "hasShapeFlexibility" : "0",
156
+ "isOptional" : "0",
157
+ "dataType" : "Float16",
158
+ "formattedType" : "MultiArray (Float16 64 × 64)",
159
+ "shortDescription" : "",
160
+ "shape" : "[64, 64]",
161
+ "name" : "sin",
162
+ "type" : "MultiArray"
163
+ },
164
+ {
165
+ "hasShapeFlexibility" : "0",
166
+ "isOptional" : "0",
167
+ "dataType" : "Float16",
168
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
169
+ "shortDescription" : "",
170
+ "shape" : "[1, 512, 1, 64]",
171
+ "name" : "mask",
172
+ "type" : "MultiArray"
173
+ },
174
+ {
175
+ "hasShapeFlexibility" : "0",
176
+ "isOptional" : "1",
177
+ "dataType" : "Float16",
178
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
179
+ "shortDescription" : "",
180
+ "shape" : "[1, 448, 1, 512]",
181
+ "name" : "k_cache_0",
182
+ "type" : "MultiArray"
183
+ },
184
+ {
185
+ "hasShapeFlexibility" : "0",
186
+ "isOptional" : "1",
187
+ "dataType" : "Float16",
188
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
189
+ "shortDescription" : "",
190
+ "shape" : "[1, 512, 1, 448]",
191
+ "name" : "v_cache_0",
192
+ "type" : "MultiArray"
193
+ },
194
+ {
195
+ "hasShapeFlexibility" : "0",
196
+ "isOptional" : "1",
197
+ "dataType" : "Float16",
198
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
199
+ "shortDescription" : "",
200
+ "shape" : "[1, 448, 1, 512]",
201
+ "name" : "k_cache_1",
202
+ "type" : "MultiArray"
203
+ },
204
+ {
205
+ "hasShapeFlexibility" : "0",
206
+ "isOptional" : "1",
207
+ "dataType" : "Float16",
208
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
209
+ "shortDescription" : "",
210
+ "shape" : "[1, 512, 1, 448]",
211
+ "name" : "v_cache_1",
212
+ "type" : "MultiArray"
213
+ },
214
+ {
215
+ "hasShapeFlexibility" : "0",
216
+ "isOptional" : "1",
217
+ "dataType" : "Float16",
218
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
219
+ "shortDescription" : "",
220
+ "shape" : "[1, 448, 1, 512]",
221
+ "name" : "k_cache_2",
222
+ "type" : "MultiArray"
223
+ },
224
+ {
225
+ "hasShapeFlexibility" : "0",
226
+ "isOptional" : "1",
227
+ "dataType" : "Float16",
228
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
229
+ "shortDescription" : "",
230
+ "shape" : "[1, 512, 1, 448]",
231
+ "name" : "v_cache_2",
232
+ "type" : "MultiArray"
233
+ },
234
+ {
235
+ "hasShapeFlexibility" : "0",
236
+ "isOptional" : "1",
237
+ "dataType" : "Float16",
238
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)?",
239
+ "shortDescription" : "",
240
+ "shape" : "[1, 448, 1, 512]",
241
+ "name" : "k_cache_3",
242
+ "type" : "MultiArray"
243
+ },
244
+ {
245
+ "hasShapeFlexibility" : "0",
246
+ "isOptional" : "1",
247
+ "dataType" : "Float16",
248
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)?",
249
+ "shortDescription" : "",
250
+ "shape" : "[1, 512, 1, 448]",
251
+ "name" : "v_cache_3",
252
+ "type" : "MultiArray"
253
+ }
254
+ ],
255
+ "generatedClassName" : "Llama_3_2_1B_Instruct_2024_10_10_23_56_41_chunk5",
256
+ "method" : "predict"
257
+ }
258
+ ]
Llama-3.2-1B-Instruct_chunk5.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
Llama-3.2-1B-Instruct_chunk5.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb54b3abf03d6e5002315bdfbf933e239c11369de782783287db9219e238cf8e
3
+ size 486575936
Llama-3.2-1B-Instruct_chunk6.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23aa1a8a2b6bca88beeecf08d6281cdeb43aad33eb02cbe10ebff7eede7ed329
3
+ size 243
Llama-3.2-1B-Instruct_chunk6.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2740a88eec733758ae31768866e8550009ce36eb5177a09938d42cb18a095d05
3
+ size 311
Llama-3.2-1B-Instruct_chunk6.mlmodelc/metadata.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 64 × 128256)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 64, 128256]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Concat" : 2,
23
+ "Ios16.mul" : 2,
24
+ "Squeeze" : 1,
25
+ "Transpose" : 1,
26
+ "Ios16.reshape" : 10,
27
+ "Ios16.matmul" : 8,
28
+ "Ios16.realDiv" : 1,
29
+ "Ios16.reduceL2Norm" : 1
30
+ },
31
+ "computePrecision" : "Mixed (Float16, Int32)",
32
+ "isUpdatable" : "0",
33
+ "availability" : {
34
+ "macOS" : "13.0",
35
+ "tvOS" : "16.0",
36
+ "visionOS" : "1.0",
37
+ "watchOS" : "9.0",
38
+ "iOS" : "16.0",
39
+ "macCatalyst" : "16.0"
40
+ },
41
+ "modelType" : {
42
+ "name" : "MLModelType_mlProgram"
43
+ },
44
+ "userDefinedMetadata" : {
45
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
46
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
47
+ "com.github.apple.coremltools.version" : "8.0b1"
48
+ },
49
+ "inputSchema" : [
50
+ {
51
+ "hasShapeFlexibility" : "0",
52
+ "isOptional" : "0",
53
+ "dataType" : "Float16",
54
+ "formattedType" : "MultiArray (Float16 1 × 2048 × 8 × 8)",
55
+ "shortDescription" : "",
56
+ "shape" : "[1, 2048, 8, 8]",
57
+ "name" : "x",
58
+ "type" : "MultiArray"
59
+ }
60
+ ],
61
+ "generatedClassName" : "Llama_3_2_1B_Instruct_2024_10_10_23_56_41_chunk6",
62
+ "method" : "predict"
63
+ }
64
+ ]
Llama-3.2-1B-Instruct_chunk6.mlmodelc/model.mil ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0b1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [1, 2048, 8, 8]> x) {
5
+ tensor<bool, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<bool, []>(true)];
6
+ tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(1)];
7
+ tensor<bool, []> x_eps_interleave_0 = const()[name = tensor<string, []>("x_eps_interleave_0"), val = tensor<bool, []>(false)];
8
+ tensor<fp16, [1, 1, 8, 8]> eps_chan_to_fp16 = const()[name = tensor<string, []>("eps_chan_to_fp16"), val = tensor<fp16, [1, 1, 8, 8]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
9
+ tensor<fp16, [1, 2049, 8, 8]> x_eps_cast_fp16 = concat(axis = var_9, interleave = x_eps_interleave_0, values = (x, eps_chan_to_fp16))[name = tensor<string, []>("x_eps_cast_fp16")];
10
+ tensor<int32, [1]> norm_x_axes_0 = const()[name = tensor<string, []>("norm_x_axes_0"), val = tensor<int32, [1]>([1])];
11
+ tensor<fp16, [1, 1, 8, 8]> norm_x_cast_fp16 = reduce_l2_norm(axes = norm_x_axes_0, keep_dims = var_6, x = x_eps_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
12
+ tensor<fp16, [1, 2048, 8, 8]> x_normed_1_cast_fp16 = real_div(x = x, y = norm_x_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
13
+ tensor<fp16, []> var_34_to_fp16 = const()[name = tensor<string, []>("op_34_to_fp16"), val = tensor<fp16, []>(0x1.6ap+5)];
14
+ tensor<fp16, [1, 2048, 8, 8]> x_normed_3_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = var_34_to_fp16)[name = tensor<string, []>("x_normed_3_cast_fp16")];
15
+ tensor<fp16, [1, 2048, 1, 1]> ln_f_weight_to_fp16 = const()[name = tensor<string, []>("ln_f_weight_to_fp16"), val = tensor<fp16, [1, 2048, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(256)))];
16
+ tensor<fp16, [1, 2048, 8, 8]> x_5_cast_fp16 = mul(x = x_normed_3_cast_fp16, y = ln_f_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
17
+ tensor<int32, [4]> var_48 = const()[name = tensor<string, []>("op_48"), val = tensor<int32, [4]>([1, 2048, 1, -1])];
18
+ tensor<fp16, [1, 2048, 1, 64]> x_cast_fp16 = reshape(shape = var_48, x = x_5_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
19
+ tensor<int32, [1]> var_51_axes_0 = const()[name = tensor<string, []>("op_51_axes_0"), val = tensor<int32, [1]>([2])];
20
+ tensor<fp16, [1, 2048, 64]> var_51_cast_fp16 = squeeze(axes = var_51_axes_0, x = x_cast_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
21
+ tensor<int32, [3]> var_54_perm_0 = const()[name = tensor<string, []>("op_54_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
22
+ tensor<int32, [2]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<int32, [2]>([64, 2048])];
23
+ tensor<fp16, [1, 64, 2048]> var_54_cast_fp16 = transpose(perm = var_54_perm_0, x = var_51_cast_fp16)[name = tensor<string, []>("transpose_16")];
24
+ tensor<fp16, [64, 2048]> reshape_0_cast_fp16 = reshape(shape = concat_4, x = var_54_cast_fp16)[name = tensor<string, []>("reshape_0_cast_fp16")];
25
+ tensor<bool, []> matmul_0_transpose_x_0 = const()[name = tensor<string, []>("matmul_0_transpose_x_0"), val = tensor<bool, []>(false)];
26
+ tensor<bool, []> matmul_0_transpose_y_0 = const()[name = tensor<string, []>("matmul_0_transpose_y_0"), val = tensor<bool, []>(false)];
27
+ tensor<fp16, [2048, 16384]> transpose_1_to_fp16 = const()[name = tensor<string, []>("transpose_1_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4416)))];
28
+ tensor<fp16, [64, 16384]> matmul_0_cast_fp16 = matmul(transpose_x = matmul_0_transpose_x_0, transpose_y = matmul_0_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_1_to_fp16)[name = tensor<string, []>("matmul_0_cast_fp16")];
29
+ tensor<int32, [3]> concat_8 = const()[name = tensor<string, []>("concat_8"), val = tensor<int32, [3]>([1, 64, 16384])];
30
+ tensor<fp16, [1, 64, 16384]> reshape_2_cast_fp16 = reshape(shape = concat_8, x = matmul_0_cast_fp16)[name = tensor<string, []>("reshape_2_cast_fp16")];
31
+ tensor<bool, []> matmul_1_transpose_x_0 = const()[name = tensor<string, []>("matmul_1_transpose_x_0"), val = tensor<bool, []>(false)];
32
+ tensor<bool, []> matmul_1_transpose_y_0 = const()[name = tensor<string, []>("matmul_1_transpose_y_0"), val = tensor<bool, []>(false)];
33
+ tensor<fp16, [2048, 16384]> transpose_3_to_fp16 = const()[name = tensor<string, []>("transpose_3_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(67113344)))];
34
+ tensor<fp16, [64, 16384]> matmul_1_cast_fp16 = matmul(transpose_x = matmul_1_transpose_x_0, transpose_y = matmul_1_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_3_to_fp16)[name = tensor<string, []>("matmul_1_cast_fp16")];
35
+ tensor<int32, [3]> concat_16 = const()[name = tensor<string, []>("concat_16"), val = tensor<int32, [3]>([1, 64, 16384])];
36
+ tensor<fp16, [1, 64, 16384]> reshape_5_cast_fp16 = reshape(shape = concat_16, x = matmul_1_cast_fp16)[name = tensor<string, []>("reshape_5_cast_fp16")];
37
+ tensor<bool, []> matmul_2_transpose_x_0 = const()[name = tensor<string, []>("matmul_2_transpose_x_0"), val = tensor<bool, []>(false)];
38
+ tensor<bool, []> matmul_2_transpose_y_0 = const()[name = tensor<string, []>("matmul_2_transpose_y_0"), val = tensor<bool, []>(false)];
39
+ tensor<fp16, [2048, 16384]> transpose_5_to_fp16 = const()[name = tensor<string, []>("transpose_5_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134222272)))];
40
+ tensor<fp16, [64, 16384]> matmul_2_cast_fp16 = matmul(transpose_x = matmul_2_transpose_x_0, transpose_y = matmul_2_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_5_to_fp16)[name = tensor<string, []>("matmul_2_cast_fp16")];
41
+ tensor<int32, [3]> concat_24 = const()[name = tensor<string, []>("concat_24"), val = tensor<int32, [3]>([1, 64, 16384])];
42
+ tensor<fp16, [1, 64, 16384]> reshape_8_cast_fp16 = reshape(shape = concat_24, x = matmul_2_cast_fp16)[name = tensor<string, []>("reshape_8_cast_fp16")];
43
+ tensor<bool, []> matmul_3_transpose_x_0 = const()[name = tensor<string, []>("matmul_3_transpose_x_0"), val = tensor<bool, []>(false)];
44
+ tensor<bool, []> matmul_3_transpose_y_0 = const()[name = tensor<string, []>("matmul_3_transpose_y_0"), val = tensor<bool, []>(false)];
45
+ tensor<fp16, [2048, 16384]> transpose_7_to_fp16 = const()[name = tensor<string, []>("transpose_7_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(201331200)))];
46
+ tensor<fp16, [64, 16384]> matmul_3_cast_fp16 = matmul(transpose_x = matmul_3_transpose_x_0, transpose_y = matmul_3_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_7_to_fp16)[name = tensor<string, []>("matmul_3_cast_fp16")];
47
+ tensor<int32, [3]> concat_32 = const()[name = tensor<string, []>("concat_32"), val = tensor<int32, [3]>([1, 64, 16384])];
48
+ tensor<fp16, [1, 64, 16384]> reshape_11_cast_fp16 = reshape(shape = concat_32, x = matmul_3_cast_fp16)[name = tensor<string, []>("reshape_11_cast_fp16")];
49
+ tensor<bool, []> matmul_4_transpose_x_0 = const()[name = tensor<string, []>("matmul_4_transpose_x_0"), val = tensor<bool, []>(false)];
50
+ tensor<bool, []> matmul_4_transpose_y_0 = const()[name = tensor<string, []>("matmul_4_transpose_y_0"), val = tensor<bool, []>(false)];
51
+ tensor<fp16, [2048, 16384]> transpose_9_to_fp16 = const()[name = tensor<string, []>("transpose_9_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(268440128)))];
52
+ tensor<fp16, [64, 16384]> matmul_4_cast_fp16 = matmul(transpose_x = matmul_4_transpose_x_0, transpose_y = matmul_4_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_9_to_fp16)[name = tensor<string, []>("matmul_4_cast_fp16")];
53
+ tensor<int32, [3]> concat_40 = const()[name = tensor<string, []>("concat_40"), val = tensor<int32, [3]>([1, 64, 16384])];
54
+ tensor<fp16, [1, 64, 16384]> reshape_14_cast_fp16 = reshape(shape = concat_40, x = matmul_4_cast_fp16)[name = tensor<string, []>("reshape_14_cast_fp16")];
55
+ tensor<bool, []> matmul_5_transpose_x_0 = const()[name = tensor<string, []>("matmul_5_transpose_x_0"), val = tensor<bool, []>(false)];
56
+ tensor<bool, []> matmul_5_transpose_y_0 = const()[name = tensor<string, []>("matmul_5_transpose_y_0"), val = tensor<bool, []>(false)];
57
+ tensor<fp16, [2048, 16384]> transpose_11_to_fp16 = const()[name = tensor<string, []>("transpose_11_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(335549056)))];
58
+ tensor<fp16, [64, 16384]> matmul_5_cast_fp16 = matmul(transpose_x = matmul_5_transpose_x_0, transpose_y = matmul_5_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_11_to_fp16)[name = tensor<string, []>("matmul_5_cast_fp16")];
59
+ tensor<int32, [3]> concat_48 = const()[name = tensor<string, []>("concat_48"), val = tensor<int32, [3]>([1, 64, 16384])];
60
+ tensor<fp16, [1, 64, 16384]> reshape_17_cast_fp16 = reshape(shape = concat_48, x = matmul_5_cast_fp16)[name = tensor<string, []>("reshape_17_cast_fp16")];
61
+ tensor<bool, []> matmul_6_transpose_x_0 = const()[name = tensor<string, []>("matmul_6_transpose_x_0"), val = tensor<bool, []>(false)];
62
+ tensor<bool, []> matmul_6_transpose_y_0 = const()[name = tensor<string, []>("matmul_6_transpose_y_0"), val = tensor<bool, []>(false)];
63
+ tensor<fp16, [2048, 16384]> transpose_13_to_fp16 = const()[name = tensor<string, []>("transpose_13_to_fp16"), val = tensor<fp16, [2048, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(402657984)))];
64
+ tensor<fp16, [64, 16384]> matmul_6_cast_fp16 = matmul(transpose_x = matmul_6_transpose_x_0, transpose_y = matmul_6_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_13_to_fp16)[name = tensor<string, []>("matmul_6_cast_fp16")];
65
+ tensor<int32, [3]> concat_56 = const()[name = tensor<string, []>("concat_56"), val = tensor<int32, [3]>([1, 64, 16384])];
66
+ tensor<fp16, [1, 64, 16384]> reshape_20_cast_fp16 = reshape(shape = concat_56, x = matmul_6_cast_fp16)[name = tensor<string, []>("reshape_20_cast_fp16")];
67
+ tensor<bool, []> matmul_7_transpose_x_0 = const()[name = tensor<string, []>("matmul_7_transpose_x_0"), val = tensor<bool, []>(false)];
68
+ tensor<bool, []> matmul_7_transpose_y_0 = const()[name = tensor<string, []>("matmul_7_transpose_y_0"), val = tensor<bool, []>(false)];
69
+ tensor<fp16, [2048, 13568]> transpose_15_to_fp16 = const()[name = tensor<string, []>("transpose_15_to_fp16"), val = tensor<fp16, [2048, 13568]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(469766912)))];
70
+ tensor<fp16, [64, 13568]> matmul_7_cast_fp16 = matmul(transpose_x = matmul_7_transpose_x_0, transpose_y = matmul_7_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_15_to_fp16)[name = tensor<string, []>("matmul_7_cast_fp16")];
71
+ tensor<int32, [3]> concat_64 = const()[name = tensor<string, []>("concat_64"), val = tensor<int32, [3]>([1, 64, 13568])];
72
+ tensor<fp16, [1, 64, 13568]> reshape_23_cast_fp16 = reshape(shape = concat_64, x = matmul_7_cast_fp16)[name = tensor<string, []>("reshape_23_cast_fp16")];
73
+ tensor<int32, []> var_99 = const()[name = tensor<string, []>("op_99"), val = tensor<int32, []>(-1)];
74
+ tensor<bool, []> var_100_interleave_0 = const()[name = tensor<string, []>("op_100_interleave_0"), val = tensor<bool, []>(false)];
75
+ tensor<fp16, [1, 64, 128256]> logits = concat(axis = var_99, interleave = var_100_interleave_0, values = (reshape_2_cast_fp16, reshape_5_cast_fp16, reshape_8_cast_fp16, reshape_11_cast_fp16, reshape_14_cast_fp16, reshape_17_cast_fp16, reshape_20_cast_fp16, reshape_23_cast_fp16))[name = tensor<string, []>("op_100_cast_fp16")];
76
+ } -> (logits);
77
+ }
Llama-3.2-1B-Instruct_chunk6.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4fb56007d0d4cb4a93aa67d2054ef3b3e2676a0358aff526a4ef5d66201b163
3
+ size 525341504
cache-processor.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d715da60630e06f07589e8fc3c2ed630f45943f1805cb6c078f284ee2655da88
3
+ size 243
cache-processor.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0173108f39e072006d20029cbd37f4de85c21f1908ea1f7f433ffd68d8b42f3
3
+ size 516
cache-processor.mlmodelc/metadata.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "outputSchema" : [
5
+ {
6
+ "hasShapeFlexibility" : "0",
7
+ "isOptional" : "0",
8
+ "dataType" : "Float16",
9
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)",
10
+ "shortDescription" : "",
11
+ "shape" : "[1, 448, 1, 512]",
12
+ "name" : "updated_k_cache",
13
+ "type" : "MultiArray"
14
+ },
15
+ {
16
+ "hasShapeFlexibility" : "0",
17
+ "isOptional" : "0",
18
+ "dataType" : "Float16",
19
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)",
20
+ "shortDescription" : "",
21
+ "shape" : "[1, 512, 1, 448]",
22
+ "name" : "updated_v_cache",
23
+ "type" : "MultiArray"
24
+ },
25
+ {
26
+ "hasShapeFlexibility" : "0",
27
+ "isOptional" : "0",
28
+ "dataType" : "Float16",
29
+ "formattedType" : "MultiArray (Float16)",
30
+ "shortDescription" : "",
31
+ "shape" : "[]",
32
+ "name" : "ignore_me_im_only_here_so_this_runs_on_the_ane",
33
+ "type" : "MultiArray"
34
+ }
35
+ ],
36
+ "modelParameters" : [
37
+
38
+ ],
39
+ "specificationVersion" : 7,
40
+ "mlProgramOperationTypeHistogram" : {
41
+ "SliceByIndex" : 2,
42
+ "Ios16.mul" : 1,
43
+ "Concat" : 2,
44
+ "Ios16.reduceMin" : 1
45
+ },
46
+ "computePrecision" : "Mixed (Float16, Int32)",
47
+ "isUpdatable" : "0",
48
+ "availability" : {
49
+ "macOS" : "13.0",
50
+ "tvOS" : "16.0",
51
+ "visionOS" : "1.0",
52
+ "watchOS" : "9.0",
53
+ "iOS" : "16.0",
54
+ "macCatalyst" : "16.0"
55
+ },
56
+ "modelType" : {
57
+ "name" : "MLModelType_mlProgram"
58
+ },
59
+ "userDefinedMetadata" : {
60
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
61
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
62
+ "com.github.apple.coremltools.version" : "8.0b1"
63
+ },
64
+ "inputSchema" : [
65
+ {
66
+ "hasShapeFlexibility" : "0",
67
+ "isOptional" : "0",
68
+ "dataType" : "Float16",
69
+ "formattedType" : "MultiArray (Float16 1 × 448 × 1 × 512)",
70
+ "shortDescription" : "",
71
+ "shape" : "[1, 448, 1, 512]",
72
+ "name" : "old_k_cache",
73
+ "type" : "MultiArray"
74
+ },
75
+ {
76
+ "hasShapeFlexibility" : "0",
77
+ "isOptional" : "0",
78
+ "dataType" : "Float16",
79
+ "formattedType" : "MultiArray (Float16 1 × 64 × 1 × 512)",
80
+ "shortDescription" : "",
81
+ "shape" : "[1, 64, 1, 512]",
82
+ "name" : "new_k_cache",
83
+ "type" : "MultiArray"
84
+ },
85
+ {
86
+ "hasShapeFlexibility" : "0",
87
+ "isOptional" : "0",
88
+ "dataType" : "Float16",
89
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 448)",
90
+ "shortDescription" : "",
91
+ "shape" : "[1, 512, 1, 448]",
92
+ "name" : "old_v_cache",
93
+ "type" : "MultiArray"
94
+ },
95
+ {
96
+ "hasShapeFlexibility" : "0",
97
+ "isOptional" : "0",
98
+ "dataType" : "Float16",
99
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 64)",
100
+ "shortDescription" : "",
101
+ "shape" : "[1, 512, 1, 64]",
102
+ "name" : "new_v_cache",
103
+ "type" : "MultiArray"
104
+ }
105
+ ],
106
+ "generatedClassName" : "cache_processor",
107
+ "method" : "predict"
108
+ }
109
+ ]
cache-processor.mlmodelc/model.mil ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0b1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [1, 64, 1, 512]> new_k_cache, tensor<fp16, [1, 512, 1, 64]> new_v_cache, tensor<fp16, [1, 448, 1, 512]> old_k_cache, tensor<fp16, [1, 512, 1, 448]> old_v_cache) {
5
+ tensor<int32, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<int32, []>(-3)];
6
+ tensor<bool, []> cat_k_1_interleave_0 = const()[name = tensor<string, []>("cat_k_1_interleave_0"), val = tensor<bool, []>(false)];
7
+ tensor<fp16, [1, 512, 1, 512]> cat_k_1_cast_fp16 = concat(axis = var_6, interleave = cat_k_1_interleave_0, values = (old_k_cache, new_k_cache))[name = tensor<string, []>("cat_k_1_cast_fp16")];
8
+ tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(-1)];
9
+ tensor<bool, []> cat_v_interleave_0 = const()[name = tensor<string, []>("cat_v_interleave_0"), val = tensor<bool, []>(false)];
10
+ tensor<fp16, [1, 512, 1, 512]> cat_v_cast_fp16 = concat(axis = var_9, interleave = cat_v_interleave_0, values = (old_v_cache, new_v_cache))[name = tensor<string, []>("cat_v_cast_fp16")];
11
+ tensor<int32, [4]> var_20_begin_0 = const()[name = tensor<string, []>("op_20_begin_0"), val = tensor<int32, [4]>([0, 64, 0, 0])];
12
+ tensor<int32, [4]> var_20_end_0 = const()[name = tensor<string, []>("op_20_end_0"), val = tensor<int32, [4]>([1, 512, 1, 512])];
13
+ tensor<bool, [4]> var_20_end_mask_0 = const()[name = tensor<string, []>("op_20_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
14
+ tensor<fp16, [1, 448, 1, 512]> updated_k_cache = slice_by_index(begin = var_20_begin_0, end = var_20_end_0, end_mask = var_20_end_mask_0, x = cat_k_1_cast_fp16)[name = tensor<string, []>("op_20_cast_fp16")];
15
+ tensor<int32, [4]> var_50_begin_0 = const()[name = tensor<string, []>("op_50_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 64])];
16
+ tensor<int32, [4]> var_50_end_0 = const()[name = tensor<string, []>("op_50_end_0"), val = tensor<int32, [4]>([1, 512, 1, 512])];
17
+ tensor<bool, [4]> var_50_end_mask_0 = const()[name = tensor<string, []>("op_50_end_mask_0"), val = tensor<bool, [4]>([true, true, true, false])];
18
+ tensor<fp16, [1, 512, 1, 448]> updated_v_cache = slice_by_index(begin = var_50_begin_0, end = var_50_end_0, end_mask = var_50_end_mask_0, x = cat_v_cast_fp16)[name = tensor<string, []>("op_50_cast_fp16")];
19
+ tensor<fp16, []> var_51_promoted_to_fp16 = const()[name = tensor<string, []>("op_51_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
20
+ tensor<fp16, [1, 448, 1, 512]> prod_cast_fp16 = mul(x = updated_k_cache, y = var_51_promoted_to_fp16)[name = tensor<string, []>("prod_cast_fp16")];
21
+ tensor<bool, []> var_53_keep_dims_0 = const()[name = tensor<string, []>("op_53_keep_dims_0"), val = tensor<bool, []>(false)];
22
+ tensor<fp16, []> ignore_me_im_only_here_so_this_runs_on_the_ane = reduce_min(keep_dims = var_53_keep_dims_0, x = prod_cast_fp16)[name = tensor<string, []>("op_53_cast_fp16")];
23
+ } -> (updated_k_cache, updated_v_cache, ignore_me_im_only_here_so_this_runs_on_the_ane);
24
+ }
logit-processor.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ad03dc247f59282bf008d857db8620b0ad600eb939bfa2a4e8a78438e1c2573
3
+ size 243
logit-processor.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccca55190c5da56bfc175471f3239eeeb7bffece8d38d565de9443edef9c9148
3
+ size 378
logit-processor.mlmodelc/metadata.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "outputSchema" : [
5
+ {
6
+ "hasShapeFlexibility" : "0",
7
+ "isOptional" : "0",
8
+ "dataType" : "Int32",
9
+ "formattedType" : "MultiArray (Int32)",
10
+ "shortDescription" : "",
11
+ "shape" : "[]",
12
+ "name" : "argmax",
13
+ "type" : "MultiArray"
14
+ }
15
+ ],
16
+ "modelParameters" : [
17
+
18
+ ],
19
+ "specificationVersion" : 7,
20
+ "mlProgramOperationTypeHistogram" : {
21
+ "Ios16.reduceArgmax" : 1
22
+ },
23
+ "computePrecision" : "Mixed (Float16, Int32)",
24
+ "isUpdatable" : "0",
25
+ "availability" : {
26
+ "macOS" : "13.0",
27
+ "tvOS" : "16.0",
28
+ "visionOS" : "1.0",
29
+ "watchOS" : "9.0",
30
+ "iOS" : "16.0",
31
+ "macCatalyst" : "16.0"
32
+ },
33
+ "modelType" : {
34
+ "name" : "MLModelType_mlProgram"
35
+ },
36
+ "userDefinedMetadata" : {
37
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
38
+ "com.github.apple.coremltools.source" : "torch==2.1.0",
39
+ "com.github.apple.coremltools.version" : "8.0b1"
40
+ },
41
+ "inputSchema" : [
42
+ {
43
+ "shortDescription" : "",
44
+ "dataType" : "Float16",
45
+ "hasShapeFlexibility" : "1",
46
+ "isOptional" : "0",
47
+ "shapeFlexibility" : "1 × 511 × 32000 | 1 × 1 × 32000 | 1 × 2 × 32000 | 1 × 4 × 32000 | 1 × 64 × 32000 | 1 × 64 × 128256 | 1 × 512 × 32000",
48
+ "formattedType" : "MultiArray (Float16 1 × 511 × 32000)",
49
+ "type" : "MultiArray",
50
+ "shape" : "[1, 511, 32000]",
51
+ "name" : "logits",
52
+ "enumeratedShapes" : "[[1, 511, 32000], [1, 1, 32000], [1, 2, 32000], [1, 4, 32000], [1, 64, 32000], [1, 64, 128256], [1, 512, 32000]]"
53
+ }
54
+ ],
55
+ "generatedClassName" : "logit_processor",
56
+ "method" : "predict"
57
+ }
58
+ ]
logit-processor.mlmodelc/model.mil ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0b1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [1, ?, ?]> logits) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>>>((("DefaultShapes", {{"logits", [1, 511, 32000]}}), ("EnumeratedShapes", {{"logits_1_1_1_1_32000_", {{"logits", [1, 1, 32000]}}}, {"logits_1_1_1_2_32000_", {{"logits", [1, 2, 32000]}}}, {"logits_1_1_1_4_32000_", {{"logits", [1, 4, 32000]}}}, {"logits_1_1_1_511_32000_", {{"logits", [1, 511, 32000]}}}, {"logits_1_1_1_512_32000_", {{"logits", [1, 512, 32000]}}}, {"logits_1_1_1_64_128256_", {{"logits", [1, 64, 128256]}}}, {"logits_1_1_1_64_32000_", {{"logits", [1, 64, 32000]}}}})))] {
5
+ tensor<int32, []> var_2 = const()[name = tensor<string, []>("op_2"), val = tensor<int32, []>(-1)];
6
+ tensor<bool, []> var_3 = const()[name = tensor<string, []>("op_3"), val = tensor<bool, []>(false)];
7
+ tensor<int32, [1, ?]> argmax = reduce_argmax(axis = var_2, keep_dims = var_3, x = logits)[name = tensor<string, []>("op_4_cast_fp16")];
8
+ } -> (argmax);
9
+ }