|
|
|
|
|
|
|
|
|
|
|
$assert SSE in [2, 4] |
|
$assert not AVX or SSE == 4 |
|
$assert BATCH_TILE % 8 == 0 |
|
$assert BATCH_TILE >= 8 |
|
$SIMD_TILE = BATCH_TILE |
|
$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE] |
|
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE] |
|
void xnn_f16_f32_vcvt_ukernel__${ISA}_int32_x${BATCH_TILE}( |
|
size_t batch, |
|
const void* input, |
|
float* output, |
|
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(uint16_t) == 0); |
|
assert(input != NULL); |
|
assert(output != NULL); |
|
|
|
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int32.sign_mask); |
|
const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int32.exp_offset); |
|
const __m128 vexp_scale = _mm_load_ps(params->sse_int32.exp_scale); |
|
const __m128i vmagic_bias = _mm_load_si128((const __m128i*) params->sse_int32.magic_bias); |
|
const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int32.denorm_cutoff); |
|
|
|
const uint16_t* i = (const uint16_t*) input; |
|
$if BATCH_TILE > 8: |
|
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
|
const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); |
|
$for N in range(1, SIMD_TILE): |
|
const __m128i vh${N} = _mm_loadu_si128((const __m128i*) (i + ${N * 8})); |
|
i += ${BATCH_TILE}; |
|
|
|
$for N in range(SIMD_TILE): |
|
const __m128i vw${2*N} = _mm_unpacklo_epi16(_mm_setzero_si128(), vh${N}); |
|
const __m128i vw${2*N+1} = _mm_unpackhi_epi16(_mm_setzero_si128(), vh${N}); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const __m128i vsign${N} = _mm_and_si128(vw${N}, vsign_mask); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const __m128i vnonsign${N} = _mm_xor_si128(vw${N}, vsign${N}); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const __m128i vnorm${N} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign${N}, 3), vexp_offset)), vexp_scale)); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const __m128i vdenorm${N} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign${N}, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias))); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
const __m128i vmask${N} = _mm_cmpgt_epi32(vnonsign${N}, vdenorm_cutoff); |
|
|
|
$for N in range(2*SIMD_TILE): |
|
$if SSE == 4: |
|
const __m128i vf${N} = _mm_or_si128(vsign${N}, _mm_blendv_epi8(vdenorm${N}, vnorm${N}, vmask${N})); |
|
$else: |
|
const __m128i vf${N} = _mm_or_si128(vsign${N}, |
|
_mm_or_si128(_mm_and_si128(vmask${N}, vnorm${N}), _mm_andnot_si128(vmask${N}, vdenorm${N}))); |
|
|
|
_mm_storeu_ps(output, _mm_castsi128_ps(vf0)); |
|
$for N in range(1, 2*SIMD_TILE): |
|
_mm_storeu_ps(output + ${N * 4}, _mm_castsi128_ps(vf${N})); |
|
output += ${BATCH_TILE}; |
|
} |
|
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
|
const __m128i vh = _mm_loadu_si128((const __m128i*) i); |
|
i += 8; |
|
|
|
const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh); |
|
const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh); |
|
|
|
const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask); |
|
const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask); |
|
|
|
const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo); |
|
const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi); |
|
|
|
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale)); |
|
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale)); |
|
|
|
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias))); |
|
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias))); |
|
|
|
const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff); |
|
$if SSE == 4: |
|
const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo)); |
|
$else: |
|
const __m128i vf_lo = _mm_or_si128(vsign_lo, |
|
_mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo))); |
|
|
|
const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff); |
|
$if SSE == 4: |
|
const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi)); |
|
$else: |
|
const __m128i vf_hi = _mm_or_si128(vsign_hi, |
|
_mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi))); |
|
|
|
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); |
|
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); |
|
output += 8; |
|
} |
|
if XNN_UNPREDICTABLE(batch != 0) { |
|
const __m128i vh = _mm_loadu_si128((const __m128i*) i); |
|
|
|
const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh); |
|
const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh); |
|
|
|
const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask); |
|
const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask); |
|
|
|
const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo); |
|
const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi); |
|
|
|
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale)); |
|
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale)); |
|
|
|
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias))); |
|
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias))); |
|
|
|
const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff); |
|
$if SSE == 4: |
|
__m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo)); |
|
$else: |
|
__m128i vf = _mm_or_si128(vsign_lo, |
|
_mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo))); |
|
|
|
if (batch & (4 * sizeof(uint16_t))) { |
|
_mm_storeu_ps(output, _mm_castsi128_ps(vf)); |
|
output += 4; |
|
|
|
const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff); |
|
$if SSE == 4: |
|
vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi)); |
|
$else: |
|
vf = _mm_or_si128(vsign_hi, |
|
_mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi))); |
|
} |
|
if (batch & (2 * sizeof(uint16_t))) { |
|
_mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); |
|
output += 2; |
|
|
|
vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); |
|
} |
|
if (batch & (1 * sizeof(uint16_t))) { |
|
_mm_store_ss(output, _mm_castsi128_ps(vf)); |
|
} |
|
} |
|
} |
|
|