| |
| |
| |
| |
|
|
| $assert BATCH_TILE % 8 == 0 |
| $assert BATCH_TILE >= 8 |
| $SIMD_TILE = BATCH_TILE |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| $assert OP in ["RNDNE", "RNDZ", "RNDU", "RNDD"] |
| #include <assert.h> |
|
|
| #include <immintrin.h> |
|
|
| #include <xnnpack/common.h> |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/math.h> |
| #include <xnnpack/vunary.h> |
|
|
|
|
| $_MM_FROUND_TO_FLAG = { |
| $ "RNDNE": "_MM_FROUND_TO_NEAREST_INT", |
| $ "RNDZ": "_MM_FROUND_TO_ZERO", |
| $ "RNDU": "_MM_FROUND_TO_POS_INF", |
| $ "RNDD": "_MM_FROUND_TO_NEG_INF", |
| $}[OP] |
| void xnn_f16_v${OP.lower()}_ukernel__f16c_x${BATCH_TILE}( |
| size_t batch, |
| const void* input, |
| void* output, |
| const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| { |
| assert(batch != 0); |
| assert(batch % sizeof(uint16_t) == 0); |
| assert(input != NULL); |
| assert(output != NULL); |
|
|
| const uint16_t* i = (const uint16_t*) input; |
| uint16_t* o = (uint16_t*) output; |
| $if BATCH_TILE > 8: |
| for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
| __m256 vacc${ABC[0]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
| $for N in range(1, SIMD_TILE): |
| __m256 vacc${ABC[N]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + ${N * 8}))); |
| i += ${BATCH_TILE}; |
|
|
| $for N in range(SIMD_TILE): |
| vacc${ABC[N]} = _mm256_round_ps(vacc${ABC[N]}, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_TO_NEAREST_INT); |
|
|
| _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc${ABC[0]}, _MM_FROUND_TO_NEAREST_INT)); |
| $for N in range(1, SIMD_TILE): |
| _mm_storeu_si128((__m128i*) (o + ${N * 8}), _mm256_cvtps_ph(vacc${ABC[N]}, _MM_FROUND_TO_NEAREST_INT)); |
| o += ${BATCH_TILE}; |
| } |
| for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
| __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
| i += 8; |
|
|
| vacc = _mm256_round_ps(vacc, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_TO_NEAREST_INT); |
|
|
| _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT)); |
| o += 8; |
| } |
| if XNN_UNLIKELY(batch != 0) { |
| assert(batch >= 1 * sizeof(uint16_t)); |
| assert(batch <= 7 * sizeof(uint16_t)); |
| __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
| vacc = _mm256_round_ps(vacc, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_TO_NEAREST_INT); |
| __m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT); |
| if (batch & (4 * sizeof(uint16_t))) { |
| _mm_storel_epi64((__m128i*) o, vh); |
| vh = _mm_unpackhi_epi64(vh, vh); |
| o += 4; |
| } |
| if (batch & (2 * sizeof(uint16_t))) { |
| _mm_storeu_si32(o, vh); |
| vh = _mm_srli_epi64(vh, 32); |
| o += 2; |
| } |
| if (batch & (1 * sizeof(uint16_t))) { |
| *o = (uint16_t) _mm_extract_epi16(vh, 0); |
| } |
| } |
| } |
|
|