smpanaro's picture
Update sequoia mode with transposed value cache and 4:508 input:cache length
722eedf verified
program(1.3)
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.42.1"}, {"coremlc-version", "3400.51.1"}})]
{
func input_1_context_512<ios18>(tensor<fp16, [128, 4]> cos, tensor<fp16, [1, 32, 128, 508]> k_cache_0, tensor<fp16, [1, 32, 128, 508]> k_cache_1, tensor<fp16, [1, 1, 4, 512]> mask, tensor<fp16, [128, 4]> sin, tensor<fp16, [1, 32, 508, 128]> v_cache_0, tensor<fp16, [1, 32, 508, 128]> v_cache_1, tensor<fp16, [1, 4096, 1, 4]> x) [CoreML_InputDefaultValues = dict<string, fp32>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}})] {
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464735296))))[name = string("blocks_0_attn_q_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(8388864))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464735424))))[name = string("blocks_0_attn_k_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(16777664))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464735552))))[name = string("blocks_0_attn_v_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(25166464))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464735680))))[name = string("blocks_0_attn_proj_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(33555264))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464735808))))[name = string("blocks_0_mlp_fc_1_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56099840))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464735936))))[name = string("blocks_0_mlp_fc_2_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78644416))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736064))))[name = string("blocks_0_mlp_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(101188992))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736192))))[name = string("blocks_1_attn_q_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(109577792))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736320))))[name = string("blocks_1_attn_k_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(117966592))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736448))))[name = string("blocks_1_attn_v_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(126355392))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736576))))[name = string("blocks_1_attn_proj_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(134744192))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736704))))[name = string("blocks_1_mlp_fc_1_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(157288768))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736832))))[name = string("blocks_1_mlp_fc_2_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179833344))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(464736960))))[name = string("blocks_1_mlp_proj_weight_palettized_cast_fp16")];
int32 var_19 = const()[name = string("op_19"), val = int32(-1)];
int32 var_27 = const()[name = string("op_27"), val = int32(3)];
int32 var_28 = const()[name = string("op_28"), val = int32(1)];
int32 var_31 = const()[name = string("op_31"), val = int32(-2)];
bool var_32 = const()[name = string("op_32"), val = bool(true)];
tensor<int32, [1]> var_50_axes_0 = const()[name = string("op_50_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 4]> var_50_cast_fp16 = squeeze(axes = var_50_axes_0, x = x)[name = string("op_50_cast_fp16")];
bool var_52_interleave_0 = const()[name = string("op_52_interleave_0"), val = bool(false)];
tensor<fp16, [1, 1, 4]> eps_chan_1_to_fp16 = const()[name = string("eps_chan_1_to_fp16"), val = tensor<fp16, [1, 1, 4]>([[[0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3]]])];
tensor<fp16, [1, 4097, 4]> var_52_cast_fp16 = concat(axis = var_28, interleave = var_52_interleave_0, values = (var_50_cast_fp16, eps_chan_1_to_fp16))[name = string("op_52_cast_fp16")];
tensor<int32, [1]> x_eps_1_axes_0 = const()[name = string("x_eps_1_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 4]> x_eps_1_cast_fp16 = expand_dims(axes = x_eps_1_axes_0, x = var_52_cast_fp16)[name = string("x_eps_1_cast_fp16")];
tensor<int32, [1]> norm_x_1_axes_0 = const()[name = string("norm_x_1_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 4]> norm_x_1_cast_fp16 = reduce_l2_norm(axes = norm_x_1_axes_0, keep_dims = var_32, x = x_eps_1_cast_fp16)[name = string("norm_x_1_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_normed_1_cast_fp16 = real_div(x = x, y = norm_x_1_cast_fp16)[name = string("x_normed_1_cast_fp16")];
fp16 var_57_to_fp16 = const()[name = string("op_57_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 4]> x_normed_3_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = var_57_to_fp16)[name = string("x_normed_3_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = string("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202379008)))];
tensor<fp16, [1, 4096, 1, 4]> x_5_cast_fp16 = mul(x = x_normed_3_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = string("x_5_cast_fp16")];
tensor<int32, [2]> var_70 = const()[name = string("op_70"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_72 = const()[name = string("op_72"), val = tensor<int32, [2]>([1, 1])];
string var_74_pad_type_0 = const()[name = string("op_74_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_74_pad_0 = const()[name = string("op_74_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_74_cast_fp16 = conv(dilations = var_72, groups = var_28, pad = var_74_pad_0, pad_type = var_74_pad_type_0, strides = var_70, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = string("op_74_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202387264)))];
tensor<fp16, [1, 4096, 1, 4]> q_1_cast_fp16 = mul(x = var_74_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = string("q_1_cast_fp16")];
tensor<int32, [2]> var_78 = const()[name = string("op_78"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_80 = const()[name = string("op_80"), val = tensor<int32, [2]>([1, 1])];
string var_82_pad_type_0 = const()[name = string("op_82_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_82_pad_0 = const()[name = string("op_82_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_82_cast_fp16 = conv(dilations = var_80, groups = var_28, pad = var_82_pad_0, pad_type = var_82_pad_type_0, strides = var_78, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = string("op_82_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202395520)))];
tensor<fp16, [1, 4096, 1, 4]> k_1_cast_fp16 = mul(x = var_82_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = string("k_1_cast_fp16")];
tensor<int32, [2]> var_86 = const()[name = string("op_86"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_88 = const()[name = string("op_88"), val = tensor<int32, [2]>([1, 1])];
string var_90_pad_type_0 = const()[name = string("op_90_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_90_pad_0 = const()[name = string("op_90_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_90_cast_fp16 = conv(dilations = var_88, groups = var_28, pad = var_90_pad_0, pad_type = var_90_pad_type_0, strides = var_86, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = string("op_90_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202403776)))];
tensor<fp16, [1, 4096, 1, 4]> v_1_cast_fp16 = mul(x = var_90_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = string("v_1_cast_fp16")];
tensor<int32, [4]> var_92 = const()[name = string("op_92"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<fp16, [1, 32, 128, 4]> q_3_cast_fp16 = reshape(shape = var_92, x = q_1_cast_fp16)[name = string("q_3_cast_fp16")];
tensor<int32, [4]> var_94 = const()[name = string("op_94"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<fp16, [1, 32, 128, 4]> k_3_cast_fp16 = reshape(shape = var_94, x = k_1_cast_fp16)[name = string("k_3_cast_fp16")];
tensor<int32, [4]> var_96 = const()[name = string("op_96"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<fp16, [1, 32, 128, 4]> v_3_cast_fp16 = reshape(shape = var_96, x = v_1_cast_fp16)[name = string("v_3_cast_fp16")];
tensor<int32, [4]> var_108_begin_0 = const()[name = string("op_108_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_108_end_0 = const()[name = string("op_108_end_0"), val = tensor<int32, [4]>([1, 32, 64, 4])];
tensor<bool, [4]> var_108_end_mask_0 = const()[name = string("op_108_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 4]> var_108_cast_fp16 = slice_by_index(begin = var_108_begin_0, end = var_108_end_0, end_mask = var_108_end_mask_0, x = q_3_cast_fp16)[name = string("op_108_cast_fp16")];
tensor<int32, [4]> var_114_begin_0 = const()[name = string("op_114_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_114_end_0 = const()[name = string("op_114_end_0"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<bool, [4]> var_114_end_mask_0 = const()[name = string("op_114_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 4]> var_114_cast_fp16 = slice_by_index(begin = var_114_begin_0, end = var_114_end_0, end_mask = var_114_end_mask_0, x = q_3_cast_fp16)[name = string("op_114_cast_fp16")];
fp16 const_6_promoted_to_fp16 = const()[name = string("const_6_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 4]> var_116_cast_fp16 = mul(x = var_114_cast_fp16, y = const_6_promoted_to_fp16)[name = string("op_116_cast_fp16")];
bool rotated_1_interleave_0 = const()[name = string("rotated_1_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 4]> rotated_1_cast_fp16 = concat(axis = var_31, interleave = rotated_1_interleave_0, values = (var_116_cast_fp16, var_108_cast_fp16))[name = string("rotated_1_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_119_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = string("op_119_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_120_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = string("op_120_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> roped_1_cast_fp16 = add(x = var_119_cast_fp16, y = var_120_cast_fp16)[name = string("roped_1_cast_fp16")];
tensor<int32, [4]> var_133_begin_0 = const()[name = string("op_133_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_133_end_0 = const()[name = string("op_133_end_0"), val = tensor<int32, [4]>([1, 32, 64, 4])];
tensor<bool, [4]> var_133_end_mask_0 = const()[name = string("op_133_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 4]> var_133_cast_fp16 = slice_by_index(begin = var_133_begin_0, end = var_133_end_0, end_mask = var_133_end_mask_0, x = k_3_cast_fp16)[name = string("op_133_cast_fp16")];
tensor<int32, [4]> var_139_begin_0 = const()[name = string("op_139_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_139_end_0 = const()[name = string("op_139_end_0"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<bool, [4]> var_139_end_mask_0 = const()[name = string("op_139_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 4]> var_139_cast_fp16 = slice_by_index(begin = var_139_begin_0, end = var_139_end_0, end_mask = var_139_end_mask_0, x = k_3_cast_fp16)[name = string("op_139_cast_fp16")];
fp16 const_8_promoted_to_fp16 = const()[name = string("const_8_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 4]> var_141_cast_fp16 = mul(x = var_139_cast_fp16, y = const_8_promoted_to_fp16)[name = string("op_141_cast_fp16")];
bool rotated_3_interleave_0 = const()[name = string("rotated_3_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 4]> rotated_3_cast_fp16 = concat(axis = var_31, interleave = rotated_3_interleave_0, values = (var_141_cast_fp16, var_133_cast_fp16))[name = string("rotated_3_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_144_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = string("op_144_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_145_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = string("op_145_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> roped_3_cast_fp16 = add(x = var_144_cast_fp16, y = var_145_cast_fp16)[name = string("roped_3_cast_fp16")];
tensor<int32, [4]> v_5_perm_0 = const()[name = string("v_5_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
bool k_7_interleave_0 = const()[name = string("k_7_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_19, interleave = k_7_interleave_0, values = (k_cache_0, roped_3_cast_fp16))[name = string("k_7_cast_fp16")];
bool v_7_interleave_0 = const()[name = string("v_7_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 4, 128]> v_5_cast_fp16 = transpose(perm = v_5_perm_0, x = v_3_cast_fp16)[name = string("transpose_8")];
tensor<fp16, [1, 32, 512, 128]> v_7_cast_fp16 = concat(axis = var_31, interleave = v_7_interleave_0, values = (v_cache_0, v_5_cast_fp16))[name = string("v_7_cast_fp16")];
tensor<int32, [4]> var_156_begin_0 = const()[name = string("op_156_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 1])];
tensor<int32, [4]> var_156_end_0 = const()[name = string("op_156_end_0"), val = tensor<int32, [4]>([1, 32, 128, 509])];
tensor<bool, [4]> var_156_end_mask_0 = const()[name = string("op_156_end_mask_0"), val = tensor<bool, [4]>([true, true, true, false])];
tensor<fp16, [1, 32, 128, 508]> new_k_cache_0 = slice_by_index(begin = var_156_begin_0, end = var_156_end_0, end_mask = var_156_end_mask_0, x = k_7_cast_fp16)[name = string("op_156_cast_fp16")];
tensor<int32, [4]> var_157_begin_0 = const()[name = string("op_157_begin_0"), val = tensor<int32, [4]>([0, 0, 1, 0])];
tensor<int32, [4]> var_157_end_0 = const()[name = string("op_157_end_0"), val = tensor<int32, [4]>([1, 32, 509, 128])];
tensor<bool, [4]> var_157_end_mask_0 = const()[name = string("op_157_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 508, 128]> new_v_cache_0 = slice_by_index(begin = var_157_begin_0, end = var_157_end_0, end_mask = var_157_end_mask_0, x = v_7_cast_fp16)[name = string("op_157_cast_fp16")];
fp16 var_162_to_fp16 = const()[name = string("op_162_to_fp16"), val = fp16(0x1.6ap-4)];
tensor<fp16, [1, 32, 128, 4]> var_163_cast_fp16 = mul(x = roped_1_cast_fp16, y = var_162_to_fp16)[name = string("op_163_cast_fp16")];
bool attn_weights_1_transpose_x_0 = const()[name = string("attn_weights_1_transpose_x_0"), val = bool(true)];
bool attn_weights_1_transpose_y_0 = const()[name = string("attn_weights_1_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 32, 4, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_163_cast_fp16, y = k_7_cast_fp16)[name = string("attn_weights_1_cast_fp16")];
tensor<fp16, [1, 32, 4, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = string("attn_weights_3_cast_fp16")];
tensor<fp16, [1, 32, 4, 512]> attn_weights_5_cast_fp16 = softmax(axis = var_27, x = attn_weights_3_cast_fp16)[name = string("attn_weights_5_cast_fp16")];
bool var_172_transpose_x_0 = const()[name = string("op_172_transpose_x_0"), val = bool(false)];
bool var_172_transpose_y_0 = const()[name = string("op_172_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 32, 4, 128]> var_172_cast_fp16 = matmul(transpose_x = var_172_transpose_x_0, transpose_y = var_172_transpose_y_0, x = attn_weights_5_cast_fp16, y = v_7_cast_fp16)[name = string("op_172_cast_fp16")];
tensor<int32, [4]> attn_1_perm_0 = const()[name = string("attn_1_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
tensor<int32, [4]> var_175 = const()[name = string("op_175"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
tensor<fp16, [1, 32, 128, 4]> attn_1_cast_fp16 = transpose(perm = attn_1_perm_0, x = var_172_cast_fp16)[name = string("transpose_7")];
tensor<fp16, [1, 4096, 1, 4]> input_1_cast_fp16 = reshape(shape = var_175, x = attn_1_cast_fp16)[name = string("input_1_cast_fp16")];
tensor<int32, [2]> var_179 = const()[name = string("op_179"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_181 = const()[name = string("op_181"), val = tensor<int32, [2]>([1, 1])];
string var_183_pad_type_0 = const()[name = string("op_183_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_183_pad_0 = const()[name = string("op_183_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_183_cast_fp16 = conv(dilations = var_181, groups = var_28, pad = var_183_pad_0, pad_type = var_183_pad_type_0, strides = var_179, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = string("op_183_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202412032)))];
tensor<fp16, [1, 4096, 1, 4]> attention_output_1_cast_fp16 = mul(x = var_183_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = string("attention_output_1_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = string("x_11_cast_fp16")];
tensor<int32, [1]> var_202_axes_0 = const()[name = string("op_202_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 4]> var_202_cast_fp16 = squeeze(axes = var_202_axes_0, x = x_11_cast_fp16)[name = string("op_202_cast_fp16")];
bool var_204_interleave_0 = const()[name = string("op_204_interleave_0"), val = bool(false)];
tensor<fp16, [1, 1, 4]> eps_chan_3_to_fp16 = const()[name = string("eps_chan_3_to_fp16"), val = tensor<fp16, [1, 1, 4]>([[[0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3]]])];
tensor<fp16, [1, 4097, 4]> var_204_cast_fp16 = concat(axis = var_28, interleave = var_204_interleave_0, values = (var_202_cast_fp16, eps_chan_3_to_fp16))[name = string("op_204_cast_fp16")];
tensor<int32, [1]> x_eps_3_axes_0 = const()[name = string("x_eps_3_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 4]> x_eps_3_cast_fp16 = expand_dims(axes = x_eps_3_axes_0, x = var_204_cast_fp16)[name = string("x_eps_3_cast_fp16")];
tensor<int32, [1]> norm_x_3_axes_0 = const()[name = string("norm_x_3_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 4]> norm_x_3_cast_fp16 = reduce_l2_norm(axes = norm_x_3_axes_0, keep_dims = var_32, x = x_eps_3_cast_fp16)[name = string("norm_x_3_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_normed_7_cast_fp16 = real_div(x = x_11_cast_fp16, y = norm_x_3_cast_fp16)[name = string("x_normed_7_cast_fp16")];
fp16 var_209_to_fp16 = const()[name = string("op_209_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 4]> x_normed_9_cast_fp16 = mul(x = x_normed_7_cast_fp16, y = var_209_to_fp16)[name = string("x_normed_9_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = string("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202420288)))];
tensor<fp16, [1, 4096, 1, 4]> input_3_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = string("input_3_cast_fp16")];
tensor<int32, [2]> var_221 = const()[name = string("op_221"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_223 = const()[name = string("op_223"), val = tensor<int32, [2]>([1, 1])];
string var_225_pad_type_0 = const()[name = string("op_225_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_225_pad_0 = const()[name = string("op_225_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 4]> var_225_cast_fp16 = conv(dilations = var_223, groups = var_28, pad = var_225_pad_0, pad_type = var_225_pad_type_0, strides = var_221, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = string("op_225_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = string("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202428544)))];
tensor<fp16, [1, 11008, 1, 4]> input_5_cast_fp16 = mul(x = var_225_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = string("input_5_cast_fp16")];
tensor<int32, [2]> var_229 = const()[name = string("op_229"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_231 = const()[name = string("op_231"), val = tensor<int32, [2]>([1, 1])];
string var_233_pad_type_0 = const()[name = string("op_233_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_233_pad_0 = const()[name = string("op_233_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 4]> var_233_cast_fp16 = conv(dilations = var_231, groups = var_28, pad = var_233_pad_0, pad_type = var_233_pad_type_0, strides = var_229, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = string("op_233_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = string("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202450624)))];
tensor<fp16, [1, 11008, 1, 4]> x_fc_2_1_cast_fp16 = mul(x = var_233_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = string("x_fc_2_1_cast_fp16")];
tensor<fp16, [1, 11008, 1, 4]> var_235_cast_fp16 = silu(x = input_5_cast_fp16)[name = string("op_235_cast_fp16")];
tensor<fp16, [1, 11008, 1, 4]> input_7_cast_fp16 = mul(x = var_235_cast_fp16, y = x_fc_2_1_cast_fp16)[name = string("input_7_cast_fp16")];
tensor<int32, [2]> var_239 = const()[name = string("op_239"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_241 = const()[name = string("op_241"), val = tensor<int32, [2]>([1, 1])];
string var_243_pad_type_0 = const()[name = string("op_243_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_243_pad_0 = const()[name = string("op_243_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_243_cast_fp16 = conv(dilations = var_241, groups = var_28, pad = var_243_pad_0, pad_type = var_243_pad_type_0, strides = var_239, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = string("op_243_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = string("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202472704)))];
tensor<fp16, [1, 4096, 1, 4]> var_244_cast_fp16 = mul(x = var_243_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = string("op_244_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_15_cast_fp16 = add(x = var_244_cast_fp16, y = x_11_cast_fp16)[name = string("x_15_cast_fp16")];
int32 var_255 = const()[name = string("op_255"), val = int32(-1)];
int32 var_263 = const()[name = string("op_263"), val = int32(3)];
int32 var_264 = const()[name = string("op_264"), val = int32(1)];
int32 var_267 = const()[name = string("op_267"), val = int32(-2)];
bool var_268 = const()[name = string("op_268"), val = bool(true)];
tensor<int32, [1]> var_285_axes_0 = const()[name = string("op_285_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 4]> var_285_cast_fp16 = squeeze(axes = var_285_axes_0, x = x_15_cast_fp16)[name = string("op_285_cast_fp16")];
bool var_287_interleave_0 = const()[name = string("op_287_interleave_0"), val = bool(false)];
tensor<fp16, [1, 1, 4]> eps_chan_5_to_fp16 = const()[name = string("eps_chan_5_to_fp16"), val = tensor<fp16, [1, 1, 4]>([[[0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3]]])];
tensor<fp16, [1, 4097, 4]> var_287_cast_fp16 = concat(axis = var_264, interleave = var_287_interleave_0, values = (var_285_cast_fp16, eps_chan_5_to_fp16))[name = string("op_287_cast_fp16")];
tensor<int32, [1]> x_eps_5_axes_0 = const()[name = string("x_eps_5_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 4]> x_eps_5_cast_fp16 = expand_dims(axes = x_eps_5_axes_0, x = var_287_cast_fp16)[name = string("x_eps_5_cast_fp16")];
tensor<int32, [1]> norm_x_5_axes_0 = const()[name = string("norm_x_5_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 4]> norm_x_5_cast_fp16 = reduce_l2_norm(axes = norm_x_5_axes_0, keep_dims = var_268, x = x_eps_5_cast_fp16)[name = string("norm_x_5_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_normed_13_cast_fp16 = real_div(x = x_15_cast_fp16, y = norm_x_5_cast_fp16)[name = string("x_normed_13_cast_fp16")];
fp16 var_292_to_fp16 = const()[name = string("op_292_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 4]> x_normed_15_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = var_292_to_fp16)[name = string("x_normed_15_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = string("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202480960)))];
tensor<fp16, [1, 4096, 1, 4]> x_19_cast_fp16 = mul(x = x_normed_15_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = string("x_19_cast_fp16")];
tensor<int32, [2]> var_308 = const()[name = string("op_308"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_310 = const()[name = string("op_310"), val = tensor<int32, [2]>([1, 1])];
string var_312_pad_type_0 = const()[name = string("op_312_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_312_pad_0 = const()[name = string("op_312_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_312_cast_fp16 = conv(dilations = var_310, groups = var_264, pad = var_312_pad_0, pad_type = var_312_pad_type_0, strides = var_308, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = string("op_312_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202489216)))];
tensor<fp16, [1, 4096, 1, 4]> q_7_cast_fp16 = mul(x = var_312_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = string("q_7_cast_fp16")];
tensor<int32, [2]> var_316 = const()[name = string("op_316"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_318 = const()[name = string("op_318"), val = tensor<int32, [2]>([1, 1])];
string var_320_pad_type_0 = const()[name = string("op_320_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_320_pad_0 = const()[name = string("op_320_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_320_cast_fp16 = conv(dilations = var_318, groups = var_264, pad = var_320_pad_0, pad_type = var_320_pad_type_0, strides = var_316, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = string("op_320_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202497472)))];
tensor<fp16, [1, 4096, 1, 4]> k_9_cast_fp16 = mul(x = var_320_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = string("k_9_cast_fp16")];
tensor<int32, [2]> var_324 = const()[name = string("op_324"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_326 = const()[name = string("op_326"), val = tensor<int32, [2]>([1, 1])];
string var_328_pad_type_0 = const()[name = string("op_328_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_328_pad_0 = const()[name = string("op_328_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_328_cast_fp16 = conv(dilations = var_326, groups = var_264, pad = var_328_pad_0, pad_type = var_328_pad_type_0, strides = var_324, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = string("op_328_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202505728)))];
tensor<fp16, [1, 4096, 1, 4]> v_9_cast_fp16 = mul(x = var_328_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = string("v_9_cast_fp16")];
tensor<int32, [4]> var_330 = const()[name = string("op_330"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<fp16, [1, 32, 128, 4]> q_9_cast_fp16 = reshape(shape = var_330, x = q_7_cast_fp16)[name = string("q_9_cast_fp16")];
tensor<int32, [4]> var_332 = const()[name = string("op_332"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<fp16, [1, 32, 128, 4]> k_11_cast_fp16 = reshape(shape = var_332, x = k_9_cast_fp16)[name = string("k_11_cast_fp16")];
tensor<int32, [4]> var_334 = const()[name = string("op_334"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<fp16, [1, 32, 128, 4]> v_11_cast_fp16 = reshape(shape = var_334, x = v_9_cast_fp16)[name = string("v_11_cast_fp16")];
tensor<int32, [4]> var_346_begin_0 = const()[name = string("op_346_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_346_end_0 = const()[name = string("op_346_end_0"), val = tensor<int32, [4]>([1, 32, 64, 4])];
tensor<bool, [4]> var_346_end_mask_0 = const()[name = string("op_346_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 4]> var_346_cast_fp16 = slice_by_index(begin = var_346_begin_0, end = var_346_end_0, end_mask = var_346_end_mask_0, x = q_9_cast_fp16)[name = string("op_346_cast_fp16")];
tensor<int32, [4]> var_352_begin_0 = const()[name = string("op_352_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_352_end_0 = const()[name = string("op_352_end_0"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<bool, [4]> var_352_end_mask_0 = const()[name = string("op_352_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 4]> var_352_cast_fp16 = slice_by_index(begin = var_352_begin_0, end = var_352_end_0, end_mask = var_352_end_mask_0, x = q_9_cast_fp16)[name = string("op_352_cast_fp16")];
fp16 const_19_promoted_to_fp16 = const()[name = string("const_19_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 4]> var_354_cast_fp16 = mul(x = var_352_cast_fp16, y = const_19_promoted_to_fp16)[name = string("op_354_cast_fp16")];
bool rotated_5_interleave_0 = const()[name = string("rotated_5_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 4]> rotated_5_cast_fp16 = concat(axis = var_267, interleave = rotated_5_interleave_0, values = (var_354_cast_fp16, var_346_cast_fp16))[name = string("rotated_5_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_357_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = string("op_357_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_358_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = string("op_358_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> roped_5_cast_fp16 = add(x = var_357_cast_fp16, y = var_358_cast_fp16)[name = string("roped_5_cast_fp16")];
tensor<int32, [4]> var_371_begin_0 = const()[name = string("op_371_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_371_end_0 = const()[name = string("op_371_end_0"), val = tensor<int32, [4]>([1, 32, 64, 4])];
tensor<bool, [4]> var_371_end_mask_0 = const()[name = string("op_371_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 4]> var_371_cast_fp16 = slice_by_index(begin = var_371_begin_0, end = var_371_end_0, end_mask = var_371_end_mask_0, x = k_11_cast_fp16)[name = string("op_371_cast_fp16")];
tensor<int32, [4]> var_377_begin_0 = const()[name = string("op_377_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_377_end_0 = const()[name = string("op_377_end_0"), val = tensor<int32, [4]>([1, 32, 128, 4])];
tensor<bool, [4]> var_377_end_mask_0 = const()[name = string("op_377_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 4]> var_377_cast_fp16 = slice_by_index(begin = var_377_begin_0, end = var_377_end_0, end_mask = var_377_end_mask_0, x = k_11_cast_fp16)[name = string("op_377_cast_fp16")];
fp16 const_21_promoted_to_fp16 = const()[name = string("const_21_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 4]> var_379_cast_fp16 = mul(x = var_377_cast_fp16, y = const_21_promoted_to_fp16)[name = string("op_379_cast_fp16")];
bool rotated_interleave_0 = const()[name = string("rotated_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 4]> rotated_cast_fp16 = concat(axis = var_267, interleave = rotated_interleave_0, values = (var_379_cast_fp16, var_371_cast_fp16))[name = string("rotated_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_382_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = string("op_382_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> var_383_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = string("op_383_cast_fp16")];
tensor<fp16, [1, 32, 128, 4]> roped_cast_fp16 = add(x = var_382_cast_fp16, y = var_383_cast_fp16)[name = string("roped_cast_fp16")];
tensor<int32, [4]> v_13_perm_0 = const()[name = string("v_13_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
bool k_interleave_0 = const()[name = string("k_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_255, interleave = k_interleave_0, values = (k_cache_1, roped_cast_fp16))[name = string("k_cast_fp16")];
bool v_interleave_0 = const()[name = string("v_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 4, 128]> v_13_cast_fp16 = transpose(perm = v_13_perm_0, x = v_11_cast_fp16)[name = string("transpose_6")];
tensor<fp16, [1, 32, 512, 128]> v_cast_fp16 = concat(axis = var_267, interleave = v_interleave_0, values = (v_cache_1, v_13_cast_fp16))[name = string("v_cast_fp16")];
tensor<int32, [4]> var_394_begin_0 = const()[name = string("op_394_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 1])];
tensor<int32, [4]> var_394_end_0 = const()[name = string("op_394_end_0"), val = tensor<int32, [4]>([1, 32, 128, 509])];
tensor<bool, [4]> var_394_end_mask_0 = const()[name = string("op_394_end_mask_0"), val = tensor<bool, [4]>([true, true, true, false])];
tensor<fp16, [1, 32, 128, 508]> new_k_cache_1 = slice_by_index(begin = var_394_begin_0, end = var_394_end_0, end_mask = var_394_end_mask_0, x = k_cast_fp16)[name = string("op_394_cast_fp16")];
tensor<int32, [4]> var_395_begin_0 = const()[name = string("op_395_begin_0"), val = tensor<int32, [4]>([0, 0, 1, 0])];
tensor<int32, [4]> var_395_end_0 = const()[name = string("op_395_end_0"), val = tensor<int32, [4]>([1, 32, 509, 128])];
tensor<bool, [4]> var_395_end_mask_0 = const()[name = string("op_395_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 508, 128]> new_v_cache_1 = slice_by_index(begin = var_395_begin_0, end = var_395_end_0, end_mask = var_395_end_mask_0, x = v_cast_fp16)[name = string("op_395_cast_fp16")];
fp16 var_400_to_fp16 = const()[name = string("op_400_to_fp16"), val = fp16(0x1.6ap-4)];
tensor<fp16, [1, 32, 128, 4]> var_401_cast_fp16 = mul(x = roped_5_cast_fp16, y = var_400_to_fp16)[name = string("op_401_cast_fp16")];
bool attn_weights_7_transpose_x_0 = const()[name = string("attn_weights_7_transpose_x_0"), val = bool(true)];
bool attn_weights_7_transpose_y_0 = const()[name = string("attn_weights_7_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 32, 4, 512]> attn_weights_7_cast_fp16 = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = var_401_cast_fp16, y = k_cast_fp16)[name = string("attn_weights_7_cast_fp16")];
tensor<fp16, [1, 32, 4, 512]> attn_weights_9_cast_fp16 = add(x = attn_weights_7_cast_fp16, y = mask)[name = string("attn_weights_9_cast_fp16")];
tensor<fp16, [1, 32, 4, 512]> attn_weights_cast_fp16 = softmax(axis = var_263, x = attn_weights_9_cast_fp16)[name = string("attn_weights_cast_fp16")];
bool var_410_transpose_x_0 = const()[name = string("op_410_transpose_x_0"), val = bool(false)];
bool var_410_transpose_y_0 = const()[name = string("op_410_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 32, 4, 128]> var_410_cast_fp16 = matmul(transpose_x = var_410_transpose_x_0, transpose_y = var_410_transpose_y_0, x = attn_weights_cast_fp16, y = v_cast_fp16)[name = string("op_410_cast_fp16")];
tensor<int32, [4]> attn_3_perm_0 = const()[name = string("attn_3_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
tensor<int32, [4]> var_413 = const()[name = string("op_413"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
tensor<fp16, [1, 32, 128, 4]> attn_3_cast_fp16 = transpose(perm = attn_3_perm_0, x = var_410_cast_fp16)[name = string("transpose_5")];
tensor<fp16, [1, 4096, 1, 4]> input_9_cast_fp16 = reshape(shape = var_413, x = attn_3_cast_fp16)[name = string("input_9_cast_fp16")];
tensor<int32, [2]> var_417 = const()[name = string("op_417"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_419 = const()[name = string("op_419"), val = tensor<int32, [2]>([1, 1])];
string var_421_pad_type_0 = const()[name = string("op_421_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_421_pad_0 = const()[name = string("op_421_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_421_cast_fp16 = conv(dilations = var_419, groups = var_264, pad = var_421_pad_0, pad_type = var_421_pad_type_0, strides = var_417, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = string("op_421_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202513984)))];
tensor<fp16, [1, 4096, 1, 4]> attention_output_cast_fp16 = mul(x = var_421_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = string("attention_output_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_25_cast_fp16 = add(x = attention_output_cast_fp16, y = x_15_cast_fp16)[name = string("x_25_cast_fp16")];
tensor<int32, [1]> var_440_axes_0 = const()[name = string("op_440_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 4]> var_440_cast_fp16 = squeeze(axes = var_440_axes_0, x = x_25_cast_fp16)[name = string("op_440_cast_fp16")];
bool var_442_interleave_0 = const()[name = string("op_442_interleave_0"), val = bool(false)];
tensor<fp16, [1, 1, 4]> eps_chan_7_to_fp16 = const()[name = string("eps_chan_7_to_fp16"), val = tensor<fp16, [1, 1, 4]>([[[0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3]]])];
tensor<fp16, [1, 4097, 4]> var_442_cast_fp16 = concat(axis = var_264, interleave = var_442_interleave_0, values = (var_440_cast_fp16, eps_chan_7_to_fp16))[name = string("op_442_cast_fp16")];
tensor<int32, [1]> x_eps_7_axes_0 = const()[name = string("x_eps_7_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 4]> x_eps_7_cast_fp16 = expand_dims(axes = x_eps_7_axes_0, x = var_442_cast_fp16)[name = string("x_eps_7_cast_fp16")];
tensor<int32, [1]> norm_x_7_axes_0 = const()[name = string("norm_x_7_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 4]> norm_x_7_cast_fp16 = reduce_l2_norm(axes = norm_x_7_axes_0, keep_dims = var_268, x = x_eps_7_cast_fp16)[name = string("norm_x_7_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_normed_19_cast_fp16 = real_div(x = x_25_cast_fp16, y = norm_x_7_cast_fp16)[name = string("x_normed_19_cast_fp16")];
fp16 var_447_to_fp16 = const()[name = string("op_447_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 4]> x_normed_21_cast_fp16 = mul(x = x_normed_19_cast_fp16, y = var_447_to_fp16)[name = string("x_normed_21_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = string("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202522240)))];
tensor<fp16, [1, 4096, 1, 4]> input_11_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = string("input_11_cast_fp16")];
tensor<int32, [2]> var_459 = const()[name = string("op_459"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_461 = const()[name = string("op_461"), val = tensor<int32, [2]>([1, 1])];
string var_463_pad_type_0 = const()[name = string("op_463_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_463_pad_0 = const()[name = string("op_463_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 4]> var_463_cast_fp16 = conv(dilations = var_461, groups = var_264, pad = var_463_pad_0, pad_type = var_463_pad_type_0, strides = var_459, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = string("op_463_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = string("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202530496)))];
tensor<fp16, [1, 11008, 1, 4]> input_13_cast_fp16 = mul(x = var_463_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = string("input_13_cast_fp16")];
tensor<int32, [2]> var_467 = const()[name = string("op_467"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_469 = const()[name = string("op_469"), val = tensor<int32, [2]>([1, 1])];
string var_471_pad_type_0 = const()[name = string("op_471_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_471_pad_0 = const()[name = string("op_471_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 4]> var_471_cast_fp16 = conv(dilations = var_469, groups = var_264, pad = var_471_pad_0, pad_type = var_471_pad_type_0, strides = var_467, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = string("op_471_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = string("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202552576)))];
tensor<fp16, [1, 11008, 1, 4]> x_fc_2_cast_fp16 = mul(x = var_471_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = string("x_fc_2_cast_fp16")];
tensor<fp16, [1, 11008, 1, 4]> var_473_cast_fp16 = silu(x = input_13_cast_fp16)[name = string("op_473_cast_fp16")];
tensor<fp16, [1, 11008, 1, 4]> input_cast_fp16 = mul(x = var_473_cast_fp16, y = x_fc_2_cast_fp16)[name = string("input_cast_fp16")];
tensor<int32, [2]> var_477 = const()[name = string("op_477"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_479 = const()[name = string("op_479"), val = tensor<int32, [2]>([1, 1])];
string var_481_pad_type_0 = const()[name = string("op_481_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_481_pad_0 = const()[name = string("op_481_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 4]> var_481_cast_fp16 = conv(dilations = var_479, groups = var_264, pad = var_481_pad_0, pad_type = var_481_pad_type_0, strides = var_477, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = string("op_481_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = string("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202574656)))];
tensor<fp16, [1, 4096, 1, 4]> var_482_cast_fp16 = mul(x = var_481_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = string("op_482_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_29_cast_fp16 = add(x = var_482_cast_fp16, y = x_25_cast_fp16)[name = string("x_29_cast_fp16")];
int32 var_488 = const()[name = string("op_488"), val = int32(-1)];
int32 var_497 = const()[name = string("op_497"), val = int32(1)];
bool var_501 = const()[name = string("op_501"), val = bool(true)];
tensor<int32, [1]> var_517_axes_0 = const()[name = string("op_517_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 4]> var_517_cast_fp16 = squeeze(axes = var_517_axes_0, x = x_29_cast_fp16)[name = string("op_517_cast_fp16")];
bool var_519_interleave_0 = const()[name = string("op_519_interleave_0"), val = bool(false)];
tensor<fp16, [1, 1, 4]> eps_chan_to_fp16 = const()[name = string("eps_chan_to_fp16"), val = tensor<fp16, [1, 1, 4]>([[[0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3, 0x1.9e8p-3]]])];
tensor<fp16, [1, 4097, 4]> var_519_cast_fp16 = concat(axis = var_497, interleave = var_519_interleave_0, values = (var_517_cast_fp16, eps_chan_to_fp16))[name = string("op_519_cast_fp16")];
tensor<int32, [1]> x_eps_axes_0 = const()[name = string("x_eps_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 4]> x_eps_cast_fp16 = expand_dims(axes = x_eps_axes_0, x = var_519_cast_fp16)[name = string("x_eps_cast_fp16")];
tensor<int32, [1]> norm_x_axes_0 = const()[name = string("norm_x_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 4]> norm_x_cast_fp16 = reduce_l2_norm(axes = norm_x_axes_0, keep_dims = var_501, x = x_eps_cast_fp16)[name = string("norm_x_cast_fp16")];
tensor<fp16, [1, 4096, 1, 4]> x_normed_25_cast_fp16 = real_div(x = x_29_cast_fp16, y = norm_x_cast_fp16)[name = string("x_normed_25_cast_fp16")];
fp16 var_524_to_fp16 = const()[name = string("op_524_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 4]> x_normed_27_cast_fp16 = mul(x = x_normed_25_cast_fp16, y = var_524_to_fp16)[name = string("x_normed_27_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> post_block_ln_f_weight_to_fp16 = const()[name = string("post_block_ln_f_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202582912)))];
tensor<fp16, [1, 4096, 1, 4]> x_cast_fp16 = mul(x = x_normed_27_cast_fp16, y = post_block_ln_f_weight_to_fp16)[name = string("x_cast_fp16")];
tensor<int32, [1]> var_528_axes_0 = const()[name = string("op_528_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 4096, 4]> var_528_cast_fp16 = squeeze(axes = var_528_axes_0, x = x_cast_fp16)[name = string("op_528_cast_fp16")];
tensor<int32, [3]> var_529_perm_0 = const()[name = string("op_529_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
tensor<int32, [2]> concat_4 = const()[name = string("concat_4"), val = tensor<int32, [2]>([4, 4096])];
tensor<fp16, [1, 4, 4096]> var_529_cast_fp16 = transpose(perm = var_529_perm_0, x = var_528_cast_fp16)[name = string("transpose_4")];
tensor<fp16, [4, 4096]> reshape_0_cast_fp16 = reshape(shape = concat_4, x = var_529_cast_fp16)[name = string("reshape_0_cast_fp16")];
bool matmul_0_transpose_x_0 = const()[name = string("matmul_0_transpose_x_0"), val = bool(false)];
bool matmul_0_transpose_y_0 = const()[name = string("matmul_0_transpose_y_0"), val = bool(false)];
tensor<fp16, [4096, 16384]> transpose_1_to_fp16 = const()[name = string("transpose_1_to_fp16"), val = tensor<fp16, [4096, 16384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202591168)))];
tensor<fp16, [4, 16384]> matmul_0_cast_fp16 = matmul(transpose_x = matmul_0_transpose_x_0, transpose_y = matmul_0_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_1_to_fp16)[name = string("matmul_0_cast_fp16")];
tensor<int32, [3]> concat_8 = const()[name = string("concat_8"), val = tensor<int32, [3]>([1, 4, 16384])];
tensor<fp16, [1, 4, 16384]> reshape_2_cast_fp16 = reshape(shape = concat_8, x = matmul_0_cast_fp16)[name = string("reshape_2_cast_fp16")];
bool matmul_1_transpose_x_0 = const()[name = string("matmul_1_transpose_x_0"), val = bool(false)];
bool matmul_1_transpose_y_0 = const()[name = string("matmul_1_transpose_y_0"), val = bool(false)];
tensor<fp16, [4096, 15616]> transpose_3_to_fp16 = const()[name = string("transpose_3_to_fp16"), val = tensor<fp16, [4096, 15616]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(336808960)))];
tensor<fp16, [4, 15616]> matmul_1_cast_fp16 = matmul(transpose_x = matmul_1_transpose_x_0, transpose_y = matmul_1_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_3_to_fp16)[name = string("matmul_1_cast_fp16")];
tensor<int32, [3]> concat_16 = const()[name = string("concat_16"), val = tensor<int32, [3]>([1, 4, 15616])];
tensor<fp16, [1, 4, 15616]> reshape_5_cast_fp16 = reshape(shape = concat_16, x = matmul_1_cast_fp16)[name = string("reshape_5_cast_fp16")];
bool var_538_interleave_0 = const()[name = string("op_538_interleave_0"), val = bool(false)];
tensor<fp16, [1, 4, 32000]> logits = concat(axis = var_488, interleave = var_538_interleave_0, values = (reshape_2_cast_fp16, reshape_5_cast_fp16))[name = string("op_538_cast_fp16")];
} -> (logits, new_k_cache_0, new_k_cache_1, new_v_cache_0, new_v_cache_1);
func input_512_context_512<ios18>(tensor<fp16, [128, 512]> cos, tensor<fp16, [1, 1, 512, 512]> mask, tensor<fp16, [128, 512]> sin, tensor<fp16, [1, 4096, 1, 512]> x) {
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(8388736))))[name = string("blocks_0_attn_q_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(8388864))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(16777536))))[name = string("blocks_0_attn_k_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(16777664))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(25166336))))[name = string("blocks_0_attn_v_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(25166464))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(33555136))))[name = string("blocks_0_attn_proj_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(33555264))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56099712))))[name = string("blocks_0_mlp_fc_1_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56099840))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78644288))))[name = string("blocks_0_mlp_fc_2_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78644416))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(101188864))))[name = string("blocks_0_mlp_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(101188992))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(109577664))))[name = string("blocks_1_attn_q_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(109577792))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(117966464))))[name = string("blocks_1_attn_k_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(117966592))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(126355264))))[name = string("blocks_1_attn_v_proj_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(126355392))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(134744064))))[name = string("blocks_1_attn_proj_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(134744192))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(157288640))))[name = string("blocks_1_mlp_fc_1_weight_palettized_cast_fp16")];
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [11008, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(157288768))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179833216))))[name = string("blocks_1_mlp_fc_2_weight_palettized_cast_fp16")];
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense(indices = tensor<uint4, [4096, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179833344))), lut = tensor<fp16, [1, 1, 1, 1, 16, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202377792))))[name = string("blocks_1_mlp_proj_weight_palettized_cast_fp16")];
int32 var_23 = const()[name = string("op_23"), val = int32(3)];
int32 var_24 = const()[name = string("op_24"), val = int32(1)];
int32 var_27 = const()[name = string("op_27"), val = int32(-2)];
bool var_28 = const()[name = string("op_28"), val = bool(true)];
tensor<int32, [1]> var_46_axes_0 = const()[name = string("op_46_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 512]> var_46_cast_fp16 = squeeze(axes = var_46_axes_0, x = x)[name = string("op_46_cast_fp16")];
bool var_48_interleave_0 = const()[name = string("op_48_interleave_0"), val = bool(false)];
tensor<fp16, [1, 1, 512]> eps_chan_1_to_fp16 = const()[name = string("eps_chan_1_to_fp16"), val = tensor<fp16, [1, 1, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202377920)))];
tensor<fp16, [1, 4097, 512]> var_48_cast_fp16 = concat(axis = var_24, interleave = var_48_interleave_0, values = (var_46_cast_fp16, eps_chan_1_to_fp16))[name = string("op_48_cast_fp16")];
tensor<int32, [1]> x_eps_1_axes_0 = const()[name = string("x_eps_1_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 512]> x_eps_1_cast_fp16 = expand_dims(axes = x_eps_1_axes_0, x = var_48_cast_fp16)[name = string("x_eps_1_cast_fp16")];
tensor<int32, [1]> norm_x_1_axes_0 = const()[name = string("norm_x_1_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 512]> norm_x_1_cast_fp16 = reduce_l2_norm(axes = norm_x_1_axes_0, keep_dims = var_28, x = x_eps_1_cast_fp16)[name = string("norm_x_1_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_normed_1_cast_fp16 = real_div(x = x, y = norm_x_1_cast_fp16)[name = string("x_normed_1_cast_fp16")];
fp16 var_53_to_fp16 = const()[name = string("op_53_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 512]> x_normed_3_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = var_53_to_fp16)[name = string("x_normed_3_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = string("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202379008)))];
tensor<fp16, [1, 4096, 1, 512]> x_5_cast_fp16 = mul(x = x_normed_3_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = string("x_5_cast_fp16")];
tensor<int32, [2]> var_66 = const()[name = string("op_66"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_68 = const()[name = string("op_68"), val = tensor<int32, [2]>([1, 1])];
string var_70_pad_type_0 = const()[name = string("op_70_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_70_pad_0 = const()[name = string("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_24, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = string("op_70_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202387264)))];
tensor<fp16, [1, 4096, 1, 512]> q_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = string("q_1_cast_fp16")];
tensor<int32, [2]> var_74 = const()[name = string("op_74"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_76 = const()[name = string("op_76"), val = tensor<int32, [2]>([1, 1])];
string var_78_pad_type_0 = const()[name = string("op_78_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_78_pad_0 = const()[name = string("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_24, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = string("op_78_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202395520)))];
tensor<fp16, [1, 4096, 1, 512]> k_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = string("k_1_cast_fp16")];
tensor<int32, [2]> var_82 = const()[name = string("op_82"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_84 = const()[name = string("op_84"), val = tensor<int32, [2]>([1, 1])];
string var_86_pad_type_0 = const()[name = string("op_86_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_86_pad_0 = const()[name = string("op_86_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_86_cast_fp16 = conv(dilations = var_84, groups = var_24, pad = var_86_pad_0, pad_type = var_86_pad_type_0, strides = var_82, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = string("op_86_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202403776)))];
tensor<fp16, [1, 4096, 1, 512]> v_1_cast_fp16 = mul(x = var_86_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = string("v_1_cast_fp16")];
tensor<int32, [4]> var_88 = const()[name = string("op_88"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<fp16, [1, 32, 128, 512]> q_3_cast_fp16 = reshape(shape = var_88, x = q_1_cast_fp16)[name = string("q_3_cast_fp16")];
tensor<int32, [4]> var_90 = const()[name = string("op_90"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<fp16, [1, 32, 128, 512]> k_3_cast_fp16 = reshape(shape = var_90, x = k_1_cast_fp16)[name = string("k_3_cast_fp16")];
tensor<int32, [4]> var_92 = const()[name = string("op_92"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<fp16, [1, 32, 128, 512]> v_3_cast_fp16 = reshape(shape = var_92, x = v_1_cast_fp16)[name = string("v_3_cast_fp16")];
tensor<int32, [4]> var_104_begin_0 = const()[name = string("op_104_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_104_end_0 = const()[name = string("op_104_end_0"), val = tensor<int32, [4]>([1, 32, 64, 512])];
tensor<bool, [4]> var_104_end_mask_0 = const()[name = string("op_104_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 512]> var_104_cast_fp16 = slice_by_index(begin = var_104_begin_0, end = var_104_end_0, end_mask = var_104_end_mask_0, x = q_3_cast_fp16)[name = string("op_104_cast_fp16")];
tensor<int32, [4]> var_110_begin_0 = const()[name = string("op_110_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_110_end_0 = const()[name = string("op_110_end_0"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<bool, [4]> var_110_end_mask_0 = const()[name = string("op_110_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 512]> var_110_cast_fp16 = slice_by_index(begin = var_110_begin_0, end = var_110_end_0, end_mask = var_110_end_mask_0, x = q_3_cast_fp16)[name = string("op_110_cast_fp16")];
fp16 const_6_promoted_to_fp16 = const()[name = string("const_6_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 512]> var_112_cast_fp16 = mul(x = var_110_cast_fp16, y = const_6_promoted_to_fp16)[name = string("op_112_cast_fp16")];
bool rotated_1_interleave_0 = const()[name = string("rotated_1_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 512]> rotated_1_cast_fp16 = concat(axis = var_27, interleave = rotated_1_interleave_0, values = (var_112_cast_fp16, var_104_cast_fp16))[name = string("rotated_1_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_115_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = string("op_115_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_116_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = string("op_116_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> roped_1_cast_fp16 = add(x = var_115_cast_fp16, y = var_116_cast_fp16)[name = string("roped_1_cast_fp16")];
tensor<int32, [4]> var_129_begin_0 = const()[name = string("op_129_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_129_end_0 = const()[name = string("op_129_end_0"), val = tensor<int32, [4]>([1, 32, 64, 512])];
tensor<bool, [4]> var_129_end_mask_0 = const()[name = string("op_129_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 512]> var_129_cast_fp16 = slice_by_index(begin = var_129_begin_0, end = var_129_end_0, end_mask = var_129_end_mask_0, x = k_3_cast_fp16)[name = string("op_129_cast_fp16")];
tensor<int32, [4]> var_135_begin_0 = const()[name = string("op_135_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_135_end_0 = const()[name = string("op_135_end_0"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<bool, [4]> var_135_end_mask_0 = const()[name = string("op_135_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 512]> var_135_cast_fp16 = slice_by_index(begin = var_135_begin_0, end = var_135_end_0, end_mask = var_135_end_mask_0, x = k_3_cast_fp16)[name = string("op_135_cast_fp16")];
fp16 const_8_promoted_to_fp16 = const()[name = string("const_8_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 512]> var_137_cast_fp16 = mul(x = var_135_cast_fp16, y = const_8_promoted_to_fp16)[name = string("op_137_cast_fp16")];
bool rotated_3_interleave_0 = const()[name = string("rotated_3_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 512]> rotated_3_cast_fp16 = concat(axis = var_27, interleave = rotated_3_interleave_0, values = (var_137_cast_fp16, var_129_cast_fp16))[name = string("rotated_3_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_140_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = string("op_140_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_141_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = string("op_141_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> roped_3_cast_fp16 = add(x = var_140_cast_fp16, y = var_141_cast_fp16)[name = string("roped_3_cast_fp16")];
tensor<int32, [4]> v_5_perm_0 = const()[name = string("v_5_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
tensor<int32, [4]> var_145_begin_0 = const()[name = string("op_145_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 1])];
tensor<int32, [4]> var_145_end_0 = const()[name = string("op_145_end_0"), val = tensor<int32, [4]>([1, 32, 128, 509])];
tensor<bool, [4]> var_145_end_mask_0 = const()[name = string("op_145_end_mask_0"), val = tensor<bool, [4]>([true, true, true, false])];
tensor<fp16, [1, 32, 128, 508]> new_k_cache_0 = slice_by_index(begin = var_145_begin_0, end = var_145_end_0, end_mask = var_145_end_mask_0, x = roped_3_cast_fp16)[name = string("op_145_cast_fp16")];
tensor<int32, [4]> var_146_begin_0 = const()[name = string("op_146_begin_0"), val = tensor<int32, [4]>([0, 0, 1, 0])];
tensor<int32, [4]> var_146_end_0 = const()[name = string("op_146_end_0"), val = tensor<int32, [4]>([1, 32, 509, 128])];
tensor<bool, [4]> var_146_end_mask_0 = const()[name = string("op_146_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 512, 128]> v_5_cast_fp16 = transpose(perm = v_5_perm_0, x = v_3_cast_fp16)[name = string("transpose_8")];
tensor<fp16, [1, 32, 508, 128]> new_v_cache_0 = slice_by_index(begin = var_146_begin_0, end = var_146_end_0, end_mask = var_146_end_mask_0, x = v_5_cast_fp16)[name = string("op_146_cast_fp16")];
fp16 var_160_to_fp16 = const()[name = string("op_160_to_fp16"), val = fp16(0x1.6ap-4)];
tensor<fp16, [1, 32, 128, 512]> var_161_cast_fp16 = mul(x = roped_1_cast_fp16, y = var_160_to_fp16)[name = string("op_161_cast_fp16")];
bool attn_weights_1_transpose_x_0 = const()[name = string("attn_weights_1_transpose_x_0"), val = bool(true)];
bool attn_weights_1_transpose_y_0 = const()[name = string("attn_weights_1_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 32, 512, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_161_cast_fp16, y = roped_3_cast_fp16)[name = string("attn_weights_1_cast_fp16")];
tensor<fp16, [1, 32, 512, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = string("attn_weights_3_cast_fp16")];
tensor<fp16, [1, 32, 512, 512]> attn_weights_5_cast_fp16 = softmax(axis = var_23, x = attn_weights_3_cast_fp16)[name = string("attn_weights_5_cast_fp16")];
bool var_170_transpose_x_1 = const()[name = string("op_170_transpose_x_1"), val = bool(false)];
bool var_170_transpose_y_1 = const()[name = string("op_170_transpose_y_1"), val = bool(true)];
tensor<fp16, [1, 32, 512, 128]> var_170_cast_fp16 = matmul(transpose_x = var_170_transpose_x_1, transpose_y = var_170_transpose_y_1, x = attn_weights_5_cast_fp16, y = v_3_cast_fp16)[name = string("op_170_cast_fp16")];
tensor<int32, [4]> attn_1_perm_0 = const()[name = string("attn_1_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
tensor<int32, [4]> var_173 = const()[name = string("op_173"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
tensor<fp16, [1, 32, 128, 512]> attn_1_cast_fp16 = transpose(perm = attn_1_perm_0, x = var_170_cast_fp16)[name = string("transpose_7")];
tensor<fp16, [1, 4096, 1, 512]> input_1_cast_fp16 = reshape(shape = var_173, x = attn_1_cast_fp16)[name = string("input_1_cast_fp16")];
tensor<int32, [2]> var_177 = const()[name = string("op_177"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_179 = const()[name = string("op_179"), val = tensor<int32, [2]>([1, 1])];
string var_181_pad_type_0 = const()[name = string("op_181_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_181_pad_0 = const()[name = string("op_181_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_181_cast_fp16 = conv(dilations = var_179, groups = var_24, pad = var_181_pad_0, pad_type = var_181_pad_type_0, strides = var_177, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = string("op_181_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = string("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202412032)))];
tensor<fp16, [1, 4096, 1, 512]> attention_output_1_cast_fp16 = mul(x = var_181_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = string("attention_output_1_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = string("x_11_cast_fp16")];
tensor<int32, [1]> var_200_axes_0 = const()[name = string("op_200_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 512]> var_200_cast_fp16 = squeeze(axes = var_200_axes_0, x = x_11_cast_fp16)[name = string("op_200_cast_fp16")];
bool var_202_interleave_0 = const()[name = string("op_202_interleave_0"), val = bool(false)];
tensor<fp16, [1, 4097, 512]> var_202_cast_fp16 = concat(axis = var_24, interleave = var_202_interleave_0, values = (var_200_cast_fp16, eps_chan_1_to_fp16))[name = string("op_202_cast_fp16")];
tensor<int32, [1]> x_eps_3_axes_0 = const()[name = string("x_eps_3_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 512]> x_eps_3_cast_fp16 = expand_dims(axes = x_eps_3_axes_0, x = var_202_cast_fp16)[name = string("x_eps_3_cast_fp16")];
tensor<int32, [1]> norm_x_3_axes_0 = const()[name = string("norm_x_3_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 512]> norm_x_3_cast_fp16 = reduce_l2_norm(axes = norm_x_3_axes_0, keep_dims = var_28, x = x_eps_3_cast_fp16)[name = string("norm_x_3_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_normed_7_cast_fp16 = real_div(x = x_11_cast_fp16, y = norm_x_3_cast_fp16)[name = string("x_normed_7_cast_fp16")];
fp16 var_207_to_fp16 = const()[name = string("op_207_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 512]> x_normed_9_cast_fp16 = mul(x = x_normed_7_cast_fp16, y = var_207_to_fp16)[name = string("x_normed_9_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = string("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202420288)))];
tensor<fp16, [1, 4096, 1, 512]> input_3_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = string("input_3_cast_fp16")];
tensor<int32, [2]> var_219 = const()[name = string("op_219"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_221 = const()[name = string("op_221"), val = tensor<int32, [2]>([1, 1])];
string var_223_pad_type_0 = const()[name = string("op_223_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_223_pad_0 = const()[name = string("op_223_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 512]> var_223_cast_fp16 = conv(dilations = var_221, groups = var_24, pad = var_223_pad_0, pad_type = var_223_pad_type_0, strides = var_219, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = string("op_223_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = string("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202428544)))];
tensor<fp16, [1, 11008, 1, 512]> input_5_cast_fp16 = mul(x = var_223_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = string("input_5_cast_fp16")];
tensor<int32, [2]> var_227 = const()[name = string("op_227"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_229 = const()[name = string("op_229"), val = tensor<int32, [2]>([1, 1])];
string var_231_pad_type_0 = const()[name = string("op_231_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_231_pad_0 = const()[name = string("op_231_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 512]> var_231_cast_fp16 = conv(dilations = var_229, groups = var_24, pad = var_231_pad_0, pad_type = var_231_pad_type_0, strides = var_227, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = string("op_231_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = string("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202450624)))];
tensor<fp16, [1, 11008, 1, 512]> x_fc_2_1_cast_fp16 = mul(x = var_231_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = string("x_fc_2_1_cast_fp16")];
tensor<fp16, [1, 11008, 1, 512]> var_233_cast_fp16 = silu(x = input_5_cast_fp16)[name = string("op_233_cast_fp16")];
tensor<fp16, [1, 11008, 1, 512]> input_7_cast_fp16 = mul(x = var_233_cast_fp16, y = x_fc_2_1_cast_fp16)[name = string("input_7_cast_fp16")];
tensor<int32, [2]> var_237 = const()[name = string("op_237"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_239 = const()[name = string("op_239"), val = tensor<int32, [2]>([1, 1])];
string var_241_pad_type_0 = const()[name = string("op_241_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_241_pad_0 = const()[name = string("op_241_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_241_cast_fp16 = conv(dilations = var_239, groups = var_24, pad = var_241_pad_0, pad_type = var_241_pad_type_0, strides = var_237, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = string("op_241_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = string("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202472704)))];
tensor<fp16, [1, 4096, 1, 512]> var_242_cast_fp16 = mul(x = var_241_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = string("op_242_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_15_cast_fp16 = add(x = var_242_cast_fp16, y = x_11_cast_fp16)[name = string("x_15_cast_fp16")];
int32 var_261 = const()[name = string("op_261"), val = int32(3)];
int32 var_262 = const()[name = string("op_262"), val = int32(1)];
int32 var_265 = const()[name = string("op_265"), val = int32(-2)];
bool var_266 = const()[name = string("op_266"), val = bool(true)];
tensor<int32, [1]> var_283_axes_0 = const()[name = string("op_283_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 512]> var_283_cast_fp16 = squeeze(axes = var_283_axes_0, x = x_15_cast_fp16)[name = string("op_283_cast_fp16")];
bool var_285_interleave_0 = const()[name = string("op_285_interleave_0"), val = bool(false)];
tensor<fp16, [1, 4097, 512]> var_285_cast_fp16 = concat(axis = var_262, interleave = var_285_interleave_0, values = (var_283_cast_fp16, eps_chan_1_to_fp16))[name = string("op_285_cast_fp16")];
tensor<int32, [1]> x_eps_5_axes_0 = const()[name = string("x_eps_5_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 512]> x_eps_5_cast_fp16 = expand_dims(axes = x_eps_5_axes_0, x = var_285_cast_fp16)[name = string("x_eps_5_cast_fp16")];
tensor<int32, [1]> norm_x_5_axes_0 = const()[name = string("norm_x_5_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 512]> norm_x_5_cast_fp16 = reduce_l2_norm(axes = norm_x_5_axes_0, keep_dims = var_266, x = x_eps_5_cast_fp16)[name = string("norm_x_5_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_normed_13_cast_fp16 = real_div(x = x_15_cast_fp16, y = norm_x_5_cast_fp16)[name = string("x_normed_13_cast_fp16")];
fp16 var_290_to_fp16 = const()[name = string("op_290_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 512]> x_normed_15_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = var_290_to_fp16)[name = string("x_normed_15_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = string("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202480960)))];
tensor<fp16, [1, 4096, 1, 512]> x_19_cast_fp16 = mul(x = x_normed_15_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = string("x_19_cast_fp16")];
tensor<int32, [2]> var_306 = const()[name = string("op_306"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_308 = const()[name = string("op_308"), val = tensor<int32, [2]>([1, 1])];
string var_310_pad_type_0 = const()[name = string("op_310_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_310_pad_0 = const()[name = string("op_310_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_310_cast_fp16 = conv(dilations = var_308, groups = var_262, pad = var_310_pad_0, pad_type = var_310_pad_type_0, strides = var_306, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = string("op_310_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202489216)))];
tensor<fp16, [1, 4096, 1, 512]> q_7_cast_fp16 = mul(x = var_310_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = string("q_7_cast_fp16")];
tensor<int32, [2]> var_314 = const()[name = string("op_314"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_316 = const()[name = string("op_316"), val = tensor<int32, [2]>([1, 1])];
string var_318_pad_type_0 = const()[name = string("op_318_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_318_pad_0 = const()[name = string("op_318_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_318_cast_fp16 = conv(dilations = var_316, groups = var_262, pad = var_318_pad_0, pad_type = var_318_pad_type_0, strides = var_314, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = string("op_318_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202497472)))];
tensor<fp16, [1, 4096, 1, 512]> k_7_cast_fp16 = mul(x = var_318_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = string("k_7_cast_fp16")];
tensor<int32, [2]> var_322 = const()[name = string("op_322"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_324 = const()[name = string("op_324"), val = tensor<int32, [2]>([1, 1])];
string var_326_pad_type_0 = const()[name = string("op_326_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_326_pad_0 = const()[name = string("op_326_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_326_cast_fp16 = conv(dilations = var_324, groups = var_262, pad = var_326_pad_0, pad_type = var_326_pad_type_0, strides = var_322, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = string("op_326_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202505728)))];
tensor<fp16, [1, 4096, 1, 512]> v_7_cast_fp16 = mul(x = var_326_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = string("v_7_cast_fp16")];
tensor<int32, [4]> var_328 = const()[name = string("op_328"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<fp16, [1, 32, 128, 512]> q_9_cast_fp16 = reshape(shape = var_328, x = q_7_cast_fp16)[name = string("q_9_cast_fp16")];
tensor<int32, [4]> var_330 = const()[name = string("op_330"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<fp16, [1, 32, 128, 512]> k_9_cast_fp16 = reshape(shape = var_330, x = k_7_cast_fp16)[name = string("k_9_cast_fp16")];
tensor<int32, [4]> var_332 = const()[name = string("op_332"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<fp16, [1, 32, 128, 512]> v_9_cast_fp16 = reshape(shape = var_332, x = v_7_cast_fp16)[name = string("v_9_cast_fp16")];
tensor<int32, [4]> var_344_begin_0 = const()[name = string("op_344_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_344_end_0 = const()[name = string("op_344_end_0"), val = tensor<int32, [4]>([1, 32, 64, 512])];
tensor<bool, [4]> var_344_end_mask_0 = const()[name = string("op_344_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 512]> var_344_cast_fp16 = slice_by_index(begin = var_344_begin_0, end = var_344_end_0, end_mask = var_344_end_mask_0, x = q_9_cast_fp16)[name = string("op_344_cast_fp16")];
tensor<int32, [4]> var_350_begin_0 = const()[name = string("op_350_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_350_end_0 = const()[name = string("op_350_end_0"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<bool, [4]> var_350_end_mask_0 = const()[name = string("op_350_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 512]> var_350_cast_fp16 = slice_by_index(begin = var_350_begin_0, end = var_350_end_0, end_mask = var_350_end_mask_0, x = q_9_cast_fp16)[name = string("op_350_cast_fp16")];
fp16 const_19_promoted_to_fp16 = const()[name = string("const_19_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 512]> var_352_cast_fp16 = mul(x = var_350_cast_fp16, y = const_19_promoted_to_fp16)[name = string("op_352_cast_fp16")];
bool rotated_5_interleave_0 = const()[name = string("rotated_5_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 512]> rotated_5_cast_fp16 = concat(axis = var_265, interleave = rotated_5_interleave_0, values = (var_352_cast_fp16, var_344_cast_fp16))[name = string("rotated_5_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_355_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = string("op_355_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_356_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = string("op_356_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> roped_5_cast_fp16 = add(x = var_355_cast_fp16, y = var_356_cast_fp16)[name = string("roped_5_cast_fp16")];
tensor<int32, [4]> var_369_begin_0 = const()[name = string("op_369_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_369_end_0 = const()[name = string("op_369_end_0"), val = tensor<int32, [4]>([1, 32, 64, 512])];
tensor<bool, [4]> var_369_end_mask_0 = const()[name = string("op_369_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 64, 512]> var_369_cast_fp16 = slice_by_index(begin = var_369_begin_0, end = var_369_end_0, end_mask = var_369_end_mask_0, x = k_9_cast_fp16)[name = string("op_369_cast_fp16")];
tensor<int32, [4]> var_375_begin_0 = const()[name = string("op_375_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
tensor<int32, [4]> var_375_end_0 = const()[name = string("op_375_end_0"), val = tensor<int32, [4]>([1, 32, 128, 512])];
tensor<bool, [4]> var_375_end_mask_0 = const()[name = string("op_375_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
tensor<fp16, [1, 32, 64, 512]> var_375_cast_fp16 = slice_by_index(begin = var_375_begin_0, end = var_375_end_0, end_mask = var_375_end_mask_0, x = k_9_cast_fp16)[name = string("op_375_cast_fp16")];
fp16 const_21_promoted_to_fp16 = const()[name = string("const_21_promoted_to_fp16"), val = fp16(-0x1p+0)];
tensor<fp16, [1, 32, 64, 512]> var_377_cast_fp16 = mul(x = var_375_cast_fp16, y = const_21_promoted_to_fp16)[name = string("op_377_cast_fp16")];
bool rotated_interleave_0 = const()[name = string("rotated_interleave_0"), val = bool(false)];
tensor<fp16, [1, 32, 128, 512]> rotated_cast_fp16 = concat(axis = var_265, interleave = rotated_interleave_0, values = (var_377_cast_fp16, var_369_cast_fp16))[name = string("rotated_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_380_cast_fp16 = mul(x = k_9_cast_fp16, y = cos)[name = string("op_380_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> var_381_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = string("op_381_cast_fp16")];
tensor<fp16, [1, 32, 128, 512]> roped_cast_fp16 = add(x = var_380_cast_fp16, y = var_381_cast_fp16)[name = string("roped_cast_fp16")];
tensor<int32, [4]> v_perm_0 = const()[name = string("v_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
tensor<int32, [4]> var_385_begin_0 = const()[name = string("op_385_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 1])];
tensor<int32, [4]> var_385_end_0 = const()[name = string("op_385_end_0"), val = tensor<int32, [4]>([1, 32, 128, 509])];
tensor<bool, [4]> var_385_end_mask_0 = const()[name = string("op_385_end_mask_0"), val = tensor<bool, [4]>([true, true, true, false])];
tensor<fp16, [1, 32, 128, 508]> new_k_cache_1 = slice_by_index(begin = var_385_begin_0, end = var_385_end_0, end_mask = var_385_end_mask_0, x = roped_cast_fp16)[name = string("op_385_cast_fp16")];
tensor<int32, [4]> var_386_begin_0 = const()[name = string("op_386_begin_0"), val = tensor<int32, [4]>([0, 0, 1, 0])];
tensor<int32, [4]> var_386_end_0 = const()[name = string("op_386_end_0"), val = tensor<int32, [4]>([1, 32, 509, 128])];
tensor<bool, [4]> var_386_end_mask_0 = const()[name = string("op_386_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<fp16, [1, 32, 512, 128]> v_cast_fp16 = transpose(perm = v_perm_0, x = v_9_cast_fp16)[name = string("transpose_6")];
tensor<fp16, [1, 32, 508, 128]> new_v_cache_1 = slice_by_index(begin = var_386_begin_0, end = var_386_end_0, end_mask = var_386_end_mask_0, x = v_cast_fp16)[name = string("op_386_cast_fp16")];
fp16 var_400_to_fp16 = const()[name = string("op_400_to_fp16"), val = fp16(0x1.6ap-4)];
tensor<fp16, [1, 32, 128, 512]> var_401_cast_fp16 = mul(x = roped_5_cast_fp16, y = var_400_to_fp16)[name = string("op_401_cast_fp16")];
bool attn_weights_7_transpose_x_0 = const()[name = string("attn_weights_7_transpose_x_0"), val = bool(true)];
bool attn_weights_7_transpose_y_0 = const()[name = string("attn_weights_7_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 32, 512, 512]> attn_weights_7_cast_fp16 = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = var_401_cast_fp16, y = roped_cast_fp16)[name = string("attn_weights_7_cast_fp16")];
tensor<fp16, [1, 32, 512, 512]> attn_weights_9_cast_fp16 = add(x = attn_weights_7_cast_fp16, y = mask)[name = string("attn_weights_9_cast_fp16")];
tensor<fp16, [1, 32, 512, 512]> attn_weights_cast_fp16 = softmax(axis = var_261, x = attn_weights_9_cast_fp16)[name = string("attn_weights_cast_fp16")];
bool var_410_transpose_x_1 = const()[name = string("op_410_transpose_x_1"), val = bool(false)];
bool var_410_transpose_y_1 = const()[name = string("op_410_transpose_y_1"), val = bool(true)];
tensor<fp16, [1, 32, 512, 128]> var_410_cast_fp16 = matmul(transpose_x = var_410_transpose_x_1, transpose_y = var_410_transpose_y_1, x = attn_weights_cast_fp16, y = v_9_cast_fp16)[name = string("op_410_cast_fp16")];
tensor<int32, [4]> attn_3_perm_0 = const()[name = string("attn_3_perm_0"), val = tensor<int32, [4]>([0, 1, -1, -2])];
tensor<int32, [4]> var_413 = const()[name = string("op_413"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
tensor<fp16, [1, 32, 128, 512]> attn_3_cast_fp16 = transpose(perm = attn_3_perm_0, x = var_410_cast_fp16)[name = string("transpose_5")];
tensor<fp16, [1, 4096, 1, 512]> input_9_cast_fp16 = reshape(shape = var_413, x = attn_3_cast_fp16)[name = string("input_9_cast_fp16")];
tensor<int32, [2]> var_417 = const()[name = string("op_417"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_419 = const()[name = string("op_419"), val = tensor<int32, [2]>([1, 1])];
string var_421_pad_type_0 = const()[name = string("op_421_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_421_pad_0 = const()[name = string("op_421_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_421_cast_fp16 = conv(dilations = var_419, groups = var_262, pad = var_421_pad_0, pad_type = var_421_pad_type_0, strides = var_417, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = string("op_421_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = string("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202513984)))];
tensor<fp16, [1, 4096, 1, 512]> attention_output_cast_fp16 = mul(x = var_421_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = string("attention_output_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_25_cast_fp16 = add(x = attention_output_cast_fp16, y = x_15_cast_fp16)[name = string("x_25_cast_fp16")];
tensor<int32, [1]> var_440_axes_0 = const()[name = string("op_440_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 512]> var_440_cast_fp16 = squeeze(axes = var_440_axes_0, x = x_25_cast_fp16)[name = string("op_440_cast_fp16")];
bool var_442_interleave_0 = const()[name = string("op_442_interleave_0"), val = bool(false)];
tensor<fp16, [1, 4097, 512]> var_442_cast_fp16 = concat(axis = var_262, interleave = var_442_interleave_0, values = (var_440_cast_fp16, eps_chan_1_to_fp16))[name = string("op_442_cast_fp16")];
tensor<int32, [1]> x_eps_7_axes_0 = const()[name = string("x_eps_7_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 512]> x_eps_7_cast_fp16 = expand_dims(axes = x_eps_7_axes_0, x = var_442_cast_fp16)[name = string("x_eps_7_cast_fp16")];
tensor<int32, [1]> norm_x_7_axes_0 = const()[name = string("norm_x_7_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 512]> norm_x_7_cast_fp16 = reduce_l2_norm(axes = norm_x_7_axes_0, keep_dims = var_266, x = x_eps_7_cast_fp16)[name = string("norm_x_7_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_normed_19_cast_fp16 = real_div(x = x_25_cast_fp16, y = norm_x_7_cast_fp16)[name = string("x_normed_19_cast_fp16")];
fp16 var_447_to_fp16 = const()[name = string("op_447_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 512]> x_normed_21_cast_fp16 = mul(x = x_normed_19_cast_fp16, y = var_447_to_fp16)[name = string("x_normed_21_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = string("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202522240)))];
tensor<fp16, [1, 4096, 1, 512]> input_11_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = string("input_11_cast_fp16")];
tensor<int32, [2]> var_459 = const()[name = string("op_459"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_461 = const()[name = string("op_461"), val = tensor<int32, [2]>([1, 1])];
string var_463_pad_type_0 = const()[name = string("op_463_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_463_pad_0 = const()[name = string("op_463_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 512]> var_463_cast_fp16 = conv(dilations = var_461, groups = var_262, pad = var_463_pad_0, pad_type = var_463_pad_type_0, strides = var_459, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = string("op_463_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = string("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202530496)))];
tensor<fp16, [1, 11008, 1, 512]> input_13_cast_fp16 = mul(x = var_463_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = string("input_13_cast_fp16")];
tensor<int32, [2]> var_467 = const()[name = string("op_467"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_469 = const()[name = string("op_469"), val = tensor<int32, [2]>([1, 1])];
string var_471_pad_type_0 = const()[name = string("op_471_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_471_pad_0 = const()[name = string("op_471_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 11008, 1, 512]> var_471_cast_fp16 = conv(dilations = var_469, groups = var_262, pad = var_471_pad_0, pad_type = var_471_pad_type_0, strides = var_467, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = string("op_471_cast_fp16")];
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = string("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202552576)))];
tensor<fp16, [1, 11008, 1, 512]> x_fc_2_cast_fp16 = mul(x = var_471_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = string("x_fc_2_cast_fp16")];
tensor<fp16, [1, 11008, 1, 512]> var_473_cast_fp16 = silu(x = input_13_cast_fp16)[name = string("op_473_cast_fp16")];
tensor<fp16, [1, 11008, 1, 512]> input_cast_fp16 = mul(x = var_473_cast_fp16, y = x_fc_2_cast_fp16)[name = string("input_cast_fp16")];
tensor<int32, [2]> var_477 = const()[name = string("op_477"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [2]> var_479 = const()[name = string("op_479"), val = tensor<int32, [2]>([1, 1])];
string var_481_pad_type_0 = const()[name = string("op_481_pad_type_0"), val = string("custom")];
tensor<int32, [4]> var_481_pad_0 = const()[name = string("op_481_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<fp16, [1, 4096, 1, 512]> var_481_cast_fp16 = conv(dilations = var_479, groups = var_262, pad = var_481_pad_0, pad_type = var_481_pad_type_0, strides = var_477, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = string("op_481_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = string("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202574656)))];
tensor<fp16, [1, 4096, 1, 512]> var_482_cast_fp16 = mul(x = var_481_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = string("op_482_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_29_cast_fp16 = add(x = var_482_cast_fp16, y = x_25_cast_fp16)[name = string("x_29_cast_fp16")];
int32 var_488 = const()[name = string("op_488"), val = int32(-1)];
int32 var_497 = const()[name = string("op_497"), val = int32(1)];
bool var_501 = const()[name = string("op_501"), val = bool(true)];
tensor<int32, [1]> var_517_axes_0 = const()[name = string("op_517_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4096, 512]> var_517_cast_fp16 = squeeze(axes = var_517_axes_0, x = x_29_cast_fp16)[name = string("op_517_cast_fp16")];
bool var_519_interleave_0 = const()[name = string("op_519_interleave_0"), val = bool(false)];
tensor<fp16, [1, 4097, 512]> var_519_cast_fp16 = concat(axis = var_497, interleave = var_519_interleave_0, values = (var_517_cast_fp16, eps_chan_1_to_fp16))[name = string("op_519_cast_fp16")];
tensor<int32, [1]> x_eps_axes_0 = const()[name = string("x_eps_axes_0"), val = tensor<int32, [1]>([-2])];
tensor<fp16, [1, 4097, 1, 512]> x_eps_cast_fp16 = expand_dims(axes = x_eps_axes_0, x = var_519_cast_fp16)[name = string("x_eps_cast_fp16")];
tensor<int32, [1]> norm_x_axes_0 = const()[name = string("norm_x_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1, 512]> norm_x_cast_fp16 = reduce_l2_norm(axes = norm_x_axes_0, keep_dims = var_501, x = x_eps_cast_fp16)[name = string("norm_x_cast_fp16")];
tensor<fp16, [1, 4096, 1, 512]> x_normed_25_cast_fp16 = real_div(x = x_29_cast_fp16, y = norm_x_cast_fp16)[name = string("x_normed_25_cast_fp16")];
fp16 var_524_to_fp16 = const()[name = string("op_524_to_fp16"), val = fp16(0x1p+6)];
tensor<fp16, [1, 4096, 1, 512]> x_normed_27_cast_fp16 = mul(x = x_normed_25_cast_fp16, y = var_524_to_fp16)[name = string("x_normed_27_cast_fp16")];
tensor<fp16, [1, 4096, 1, 1]> post_block_ln_f_weight_to_fp16 = const()[name = string("post_block_ln_f_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202582912)))];
tensor<fp16, [1, 4096, 1, 512]> x_cast_fp16 = mul(x = x_normed_27_cast_fp16, y = post_block_ln_f_weight_to_fp16)[name = string("x_cast_fp16")];
tensor<int32, [1]> var_528_axes_0 = const()[name = string("op_528_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 4096, 512]> var_528_cast_fp16 = squeeze(axes = var_528_axes_0, x = x_cast_fp16)[name = string("op_528_cast_fp16")];
tensor<int32, [3]> var_529_perm_0 = const()[name = string("op_529_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
tensor<int32, [2]> concat_8 = const()[name = string("concat_8"), val = tensor<int32, [2]>([512, 4096])];
tensor<fp16, [1, 512, 4096]> var_529_cast_fp16 = transpose(perm = var_529_perm_0, x = var_528_cast_fp16)[name = string("transpose_4")];
tensor<fp16, [512, 4096]> reshape_0_cast_fp16 = reshape(shape = concat_8, x = var_529_cast_fp16)[name = string("reshape_0_cast_fp16")];
bool matmul_0_transpose_x_0 = const()[name = string("matmul_0_transpose_x_0"), val = bool(false)];
bool matmul_0_transpose_y_0 = const()[name = string("matmul_0_transpose_y_0"), val = bool(false)];
tensor<fp16, [4096, 16384]> transpose_1_to_fp16 = const()[name = string("transpose_1_to_fp16"), val = tensor<fp16, [4096, 16384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(202591168)))];
tensor<fp16, [512, 16384]> matmul_0_cast_fp16 = matmul(transpose_x = matmul_0_transpose_x_0, transpose_y = matmul_0_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_1_to_fp16)[name = string("matmul_0_cast_fp16")];
tensor<int32, [3]> concat_12 = const()[name = string("concat_12"), val = tensor<int32, [3]>([1, 512, 16384])];
tensor<fp16, [1, 512, 16384]> reshape_2_cast_fp16 = reshape(shape = concat_12, x = matmul_0_cast_fp16)[name = string("reshape_2_cast_fp16")];
bool matmul_1_transpose_x_0 = const()[name = string("matmul_1_transpose_x_0"), val = bool(false)];
bool matmul_1_transpose_y_0 = const()[name = string("matmul_1_transpose_y_0"), val = bool(false)];
tensor<fp16, [4096, 15616]> transpose_3_to_fp16 = const()[name = string("transpose_3_to_fp16"), val = tensor<fp16, [4096, 15616]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(336808960)))];
tensor<fp16, [512, 15616]> matmul_1_cast_fp16 = matmul(transpose_x = matmul_1_transpose_x_0, transpose_y = matmul_1_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_3_to_fp16)[name = string("matmul_1_cast_fp16")];
tensor<int32, [3]> concat_20 = const()[name = string("concat_20"), val = tensor<int32, [3]>([1, 512, 15616])];
tensor<fp16, [1, 512, 15616]> reshape_5_cast_fp16 = reshape(shape = concat_20, x = matmul_1_cast_fp16)[name = string("reshape_5_cast_fp16")];
bool var_538_interleave_0 = const()[name = string("op_538_interleave_0"), val = bool(false)];
tensor<fp16, [1, 512, 32000]> logits = concat(axis = var_488, interleave = var_538_interleave_0, values = (reshape_2_cast_fp16, reshape_5_cast_fp16))[name = string("op_538_cast_fp16")];
} -> (logits, new_k_cache_0, new_k_cache_1, new_v_cache_0, new_v_cache_1);
}