|
|
|
|
|
|
|
|
|
|
|
$assert BATCH_TILE % 8 == 0 |
|
$assert BATCH_TILE >= 8 |
|
$SIMD_TILE = BATCH_TILE |
|
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
|
#include <assert.h> |
|
|
|
#include <arm_neon.h> |
|
|
|
#include <xnnpack/common.h> |
|
#include <xnnpack/raddstoreexpminusmax.h> |
|
|
|
|
|
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x${BATCH_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}( |
|
size_t batch, |
|
const void* input, |
|
const void* max, |
|
void* output, |
|
void* sum, |
|
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(uint16_t) == 0); |
|
assert(input != NULL); |
|
assert(max != NULL); |
|
assert(output != NULL); |
|
assert(sum != NULL); |
|
|
|
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max)); |
|
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e)); |
|
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias)); |
|
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi)); |
|
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo)); |
|
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2)); |
|
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1)); |
|
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff)); |
|
|
|
const uint16_t* i = (const uint16_t*) input; |
|
uint16_t* o = (uint16_t*) output; |
|
$if BATCH_TILE > 8: |
|
$for K in range(ACCUMULATORS): |
|
float16x8_t vacc${K} = vreinterpretq_f16_u16(vmovq_n_u16(0)); |
|
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
|
$for N in range(SIMD_TILE): |
|
const float16x8_t vi${ABC[N]} = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; |
|
|
|
$for N in range(SIMD_TILE): |
|
const float16x8_t vx${ABC[N]} = vsubq_f16(vi${ABC[N]}, vi_max); |
|
|
|
$for N in range(SIMD_TILE): |
|
float16x8_t vn${ABC[N]} = vfmaq_f16(vmagic_bias, vx${ABC[N]}, vlog2e); |
|
|
|
$for N in range(SIMD_TILE): |
|
const float16x8_t vs${ABC[N]} = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn${ABC[N]}), 10)); |
|
|
|
$for N in range(SIMD_TILE): |
|
vn${ABC[N]} = vsubq_f16(vn${ABC[N]}, vmagic_bias); |
|
|
|
$for N in range(SIMD_TILE): |
|
float16x8_t vt${ABC[N]} = vfmaq_f16(vx${ABC[N]}, vn${ABC[N]}, vminus_ln2_hi); |
|
|
|
$for N in range(SIMD_TILE): |
|
vt${ABC[N]} = vfmaq_f16(vt${ABC[N]}, vn${ABC[N]}, vminus_ln2_lo); |
|
|
|
$for N in range(SIMD_TILE): |
|
const float16x8_t vp${ABC[N]} = vfmaq_f16(vc1, vc2, vt${ABC[N]}); |
|
|
|
$for N in range(SIMD_TILE): |
|
vt${ABC[N]} = vmulq_f16(vt${ABC[N]}, vs${ABC[N]}); |
|
|
|
$for N in range(SIMD_TILE): |
|
float16x8_t vf${ABC[N]} = vfmaq_f16(vs${ABC[N]}, vp${ABC[N]}, vt${ABC[N]}); |
|
const uint16x8_t vm${ABC[N]} = vcltq_f16(vx${ABC[N]}, vdenorm_cutoff); |
|
|
|
$for N in range(SIMD_TILE): |
|
vf${ABC[N]} = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf${ABC[N]}), vm${ABC[N]})); |
|
|
|
$for N in range(SIMD_TILE): |
|
vst1q_u16(o, vreinterpretq_u16_f16(vf${ABC[N]})); o += 8; |
|
|
|
$for N in range(SIMD_TILE): |
|
vacc${N % ACCUMULATORS} = vaddq_f16(vacc${N % ACCUMULATORS}, vf${ABC[N]}); |
|
} |
|
$if ACCUMULATORS > 1: |
|
$ACC_SLICE = 1 |
|
$while ACC_SLICE < ACCUMULATORS: |
|
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2): |
|
$if A + ACC_SLICE < ACCUMULATORS: |
|
vacc${A} = vaddq_f16(vacc${A}, vacc${A + ACC_SLICE}); |
|
$ACC_SLICE *= 2 |
|
|
|
float16x8_t vacc = vacc0; |
|
$else: |
|
float16x8_t vacc = vreinterpretq_f16_u16(vmovq_n_u16(0)); |
|
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
|
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; |
|
|
|
const float16x8_t vx = vsubq_f16(vi, vi_max); |
|
|
|
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e); |
|
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); |
|
vn = vsubq_f16(vn, vmagic_bias); |
|
|
|
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi); |
|
vt = vfmaq_f16(vt, vn, vminus_ln2_lo); |
|
|
|
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt); |
|
vt = vmulq_f16(vt, vs); |
|
|
|
float16x8_t vf = vfmaq_f16(vs, vp, vt); |
|
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff); |
|
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm)); |
|
|
|
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8; |
|
|
|
vacc = vaddq_f16(vacc, vf); |
|
} |
|
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc)); |
|
if (batch != 0) { |
|
assert(batch >= 1 * sizeof(uint16_t)); |
|
assert(batch <= 7 * sizeof(uint16_t)); |
|
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); |
|
|
|
const float16x8_t vx = vsubq_f16(vi, vi_max); |
|
|
|
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e); |
|
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); |
|
vn = vsubq_f16(vn, vmagic_bias); |
|
|
|
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi); |
|
vt = vfmaq_f16(vt, vn, vminus_ln2_lo); |
|
|
|
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt); |
|
vt = vmulq_f16(vt, vs); |
|
|
|
float16x8_t vf = vfmaq_f16(vs, vp, vt); |
|
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff); |
|
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm)); |
|
|
|
float16x4_t vf_lo = vget_low_f16(vf); |
|
if (batch & (4 * sizeof(uint16_t))) { |
|
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4; |
|
vacc_lo = vadd_f16(vacc_lo, vf_lo); |
|
vf_lo = vget_high_f16(vf); |
|
} |
|
if (batch & (2 * sizeof(uint16_t))) { |
|
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2; |
|
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32))); |
|
vf_lo = vext_f16(vf_lo, vf_lo, 2); |
|
} |
|
if (batch & (1 * sizeof(uint16_t))) { |
|
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0); |
|
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48))); |
|
} |
|
} |
|
vacc_lo = vpadd_f16(vacc_lo, vacc_lo); |
|
vacc_lo = vpadd_f16(vacc_lo, vacc_lo); |
|
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0); |
|
} |
|
|