test / src /f16-f32-vcvt /wasmsimd-int32.c.in
Androidonnxfork's picture
Upload folder using huggingface_hub
8b7c501
raw
history blame
6.22 kB
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 8 == 0
$assert BATCH_TILE >= 8
$SIMD_TILE = BATCH_TILE // 8
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
$WASM_V32X4_LANESELECT = "wasm_i32x4_relaxed_laneselect" if RELAXED else "wasm_v128_bitselect"
$ISA = "wasmrelaxedsimd" if RELAXED else "wasmsimd"
void xnn_f16_f32_vcvt_ukernel__${ISA}_int32_x${BATCH_TILE}(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
$if BATCH_TILE > 8:
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
$for N in range(1, SIMD_TILE):
const v128_t vh${N} = wasm_v128_load(i + ${N * 8});
i += ${BATCH_TILE};
const v128_t vzero = wasm_i16x8_const_splat(0);
$for N in range(SIMD_TILE):
const v128_t vw${N*2} = wasm_v16x8_shuffle(vzero, vh${N}, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw${N*2+1} = wasm_v16x8_shuffle(vzero, vh${N}, 4, 12, 5, 13, 6, 14, 7, 15);
$for N in range(2*SIMD_TILE):
const v128_t vsign${N} = wasm_v128_and(vw${N}, vsign_mask);
$for N in range(2*SIMD_TILE):
const v128_t vnonsign${N} = wasm_v128_xor(vw${N}, vsign${N});
$for N in range(2*SIMD_TILE):
const v128_t vnorm${N} = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign${N}, 3), vexp_offset), vexp_scale);
$for N in range(2*SIMD_TILE):
const v128_t vdenorm${N} = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign${N}, 16), vmagic_bias), vmagic_bias);
$for N in range(2*SIMD_TILE):
const v128_t vxmask${N} = wasm_i32x4_gt(vnonsign${N}, vdenorm_cutoff);
$for N in range(2*SIMD_TILE):
const v128_t vf${N} = wasm_v128_or(vsign${N}, wasm_v128_bitselect(vnorm${N}, vdenorm${N}, vxmask${N}));
wasm_v128_store(output, vf0);
$for N in range(1, 2*SIMD_TILE):
wasm_v128_store(output + ${N*4}, vf${N});
output += ${BATCH_TILE};
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}