|
|
|
|
|
|
|
|
|
|
|
$assert BATCH_TILE % 8 == 0 |
|
$assert BATCH_TILE >= 8 |
|
$SIMD_TILE = BATCH_TILE |
|
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
|
#include <assert.h> |
|
|
|
#include <arm_neon.h> |
|
|
|
#include <xnnpack/common.h> |
|
#include <xnnpack/vcvt.h> |
|
|
|
|
|
void xnn_f16_f32_vcvt_ukernel__neon_int32_x${BATCH_TILE}( |
|
size_t batch, |
|
const void* input, |
|
float* output, |
|
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(uint16_t) == 0); |
|
assert(input != NULL); |
|
assert(output != NULL); |
|
|
|
const uint32x4_t vsign_mask = vmovq_n_u32(0x80000000); |
|
const uint32x4_t vexp_offset = vmovq_n_u32(0x70000000); |
|
const float32x4_t vexp_scale = vld1q_dup_f32(¶ms->neon.exp_scale); |
|
const uint32x4_t vmagic_bias = vmovq_n_u32(0x3F000000); |
|
const uint32x4_t vdenorm_cutoff = vmovq_n_u32(0x04000000); |
|
|
|
const uint16_t* i = (const uint16_t*) input; |
|
$if BATCH_TILE > 8: |
|
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
|
$for N in range(SIMD_TILE): |
|
const uint16x8_t vh${N} = vld1q_u16(i); i += 8; |
|
|
|
$for N in range(SIMD_TILE): |
|
const uint32x4_t vw${2*N} = vshll_n_u16(vget_low_u16(vh${N}), 16); |
|
const uint32x4_t vw${2*N+1} = vshll_n_u16(vget_high_u16(vh${N}), 16); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const uint32x4_t vsign${N} = vandq_u32(vw${N}, vsign_mask); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const uint32x4_t vnonsign${N} = veorq_u32(vw${N}, vsign${N}); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const float32x4_t vnorm${N} = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign${N}, 3)), vexp_scale); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const float32x4_t vdenorm${N} = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign${N}, 16)), vreinterpretq_f32_u32(vmagic_bias)); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const uint32x4_t vxmask${N} = vcgtq_u32(vnonsign${N}, vdenorm_cutoff); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const uint32x4_t vf${N} = vorrq_u32(vsign${N}, vreinterpretq_u32_f32(vbslq_f32(vxmask${N}, vnorm${N}, vdenorm${N}))); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
vst1q_f32(output, vreinterpretq_f32_u32(vf${N})); output += 4; |
|
} |
|
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
|
const uint16x8_t vh = vld1q_u16(i); i += 8; |
|
|
|
const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16); |
|
const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16); |
|
|
|
const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask); |
|
const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask); |
|
|
|
const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo); |
|
const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi); |
|
|
|
const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale); |
|
const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale); |
|
|
|
const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_lo, 16)), vreinterpretq_f32_u32(vmagic_bias)); |
|
const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_hi, 16)), vreinterpretq_f32_u32(vmagic_bias)); |
|
|
|
const uint32x4_t vxmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff); |
|
const uint32x4_t vf_lo = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo))); |
|
|
|
const uint32x4_t vxmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff); |
|
const uint32x4_t vf_hi = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi))); |
|
|
|
vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4; |
|
vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4; |
|
} |
|
if XNN_UNPREDICTABLE(batch != 0) { |
|
const uint16x8_t vh = vld1q_u16(i); i += 8; |
|
|
|
const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16); |
|
const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16); |
|
|
|
const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask); |
|
const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask); |
|
|
|
const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo); |
|
const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi); |
|
|
|
const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale); |
|
const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale); |
|
|
|
const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_lo, 16)), vreinterpretq_f32_u32(vmagic_bias)); |
|
const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_hi, 16)), vreinterpretq_f32_u32(vmagic_bias)); |
|
|
|
const uint32x4_t vxmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff); |
|
uint32x4_t vf = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo))); |
|
|
|
if (batch & (4 * sizeof(uint16_t))) { |
|
vst1q_f32(output, vreinterpretq_f32_u32(vf)); output += 4; |
|
|
|
const uint32x4_t vxmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff); |
|
vf = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi))); |
|
} |
|
uint32x2_t vf_lo = vget_low_u32(vf); |
|
if (batch & (2 * sizeof(uint16_t))) { |
|
vst1_f32(output, vreinterpret_f32_u32(vf_lo)); output += 2; |
|
vf_lo = vget_high_u32(vf); |
|
} |
|
if (batch & (1 * sizeof(uint16_t))) { |
|
vst1_lane_f32(output, vreinterpret_f32_u32(vf_lo), 0); |
|
} |
|
} |
|
} |
|
|