File size: 7,710 Bytes
8b7c501 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 8 == 0
$assert BATCH_TILE >= 8
$SIMD_TILE = BATCH_TILE // 8
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
$WASM_V32X4_LANESELECT = "wasm_i32x4_relaxed_laneselect" if RELAXED else "wasm_v128_bitselect"
$ISA = "wasmrelaxedsimd" if RELAXED else "wasmsimd"
void xnn_f16_f32_vcvt_ukernel__${ISA}_int16_x${BATCH_TILE}(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
$if BATCH_TILE > 8:
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
$for N in range(1, SIMD_TILE):
const v128_t vh${N} = wasm_v128_load(i + ${N * 8});
i += ${BATCH_TILE};
$for N in range(SIMD_TILE):
const v128_t vsign${N} = wasm_v128_and(vh${N}, vsign_mask);
$for N in range(SIMD_TILE):
const v128_t vnonsign${N} = wasm_v128_xor(vh${N}, vsign${N});
$for N in range(SIMD_TILE):
const v128_t vprenorm${N*2} = wasm_i16x8_shl(vnonsign${N}, 13);
const v128_t vprenorm${N*2+1} = wasm_i16x8_add(wasm_u16x8_shr(vnonsign${N}, 3), vexp_offset);
$for N in range(SIMD_TILE):
const v128_t vnorm${N*2} = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm${N*2}, vprenorm${N*2+1}, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm${N*2+1} = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm${N*2}, vprenorm${N*2+1}, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
$for N in range(SIMD_TILE):
const v128_t vdenorm${N*2} = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign${N}, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm${N*2+1} = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign${N}, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
$for N in range(SIMD_TILE):
const v128_t vmask${N} = wasm_i16x8_gt(vnonsign${N}, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
$for N in range(SIMD_TILE):
const v128_t vxmask${N*2} = wasm_i32x4_extend_low_i16x8(vmask${N});
const v128_t vxmask${N*2+1} = wasm_i32x4_extend_high_i16x8(vmask${N});
$for N in range(SIMD_TILE):
const v128_t vabsf${N*2} = ${WASM_V32X4_LANESELECT}(vnorm${N*2}, vdenorm${N*2}, vxmask${N*2});
const v128_t vsignf${N*2} = wasm_v16x8_shuffle(vzero, vsign${N}, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf${N*2+1} = ${WASM_V32X4_LANESELECT}(vnorm${N*2+1}, vdenorm${N*2+1}, vxmask${N*2+1});
const v128_t vsignf${N*2+1} = wasm_v16x8_shuffle(vzero, vsign${N}, 4, 12, 5, 13, 6, 14, 7, 15);
$for N in range(SIMD_TILE):
const v128_t vf${N*2} = wasm_v128_or(vsignf${N*2}, vabsf${N*2});
const v128_t vf${N*2+1} = wasm_v128_or(vsignf${N*2+1}, vabsf${N*2+1});
wasm_v128_store(output, vf0);
$for N in range(1, 2*SIMD_TILE):
wasm_v128_store(output + ${N*4}, vf${N});
output += ${BATCH_TILE};
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = ${WASM_V32X4_LANESELECT}(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = ${WASM_V32X4_LANESELECT}(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = ${WASM_V32X4_LANESELECT}(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = ${WASM_V32X4_LANESELECT}(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
|