keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cmpgt.h | .h | 6,982 | 213 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_CMPGT_H)
#define SIMDE_X86_AVX512_CMPGT_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "mov_mask.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmpgt_epi8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpgt_epi8_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask64 r;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256) && !defined(HEDLEY_INTEL_VERSION)
r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
const uint32_t t = HEDLEY_STATIC_CAST(uint32_t, simde_mm256_movemask_epi8(simde_mm256_cmpgt_epi8(a_.m256i[i], b_.m256i[i])));
r |= HEDLEY_STATIC_CAST(uint64_t, t) << HEDLEY_STATIC_CAST(uint64_t, i * 32);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m512i_private tmp;
tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.i8 > b_.i8);
r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp));
#else
r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r |= (a_.i8[i] > b_.i8[i]) ? (UINT64_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpgt_epi8_mask
#define _mm512_cmpgt_epi8_mask(a, b) simde_mm512_cmpgt_epi8_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmpgt_epu8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpgt_epu8_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask64 r = 0;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m512i_private tmp;
tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.u8 > b_.u8);
r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp));
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r |= (a_.u8[i] > b_.u8[i]) ? (UINT64_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpgt_epu8_mask
#define _mm512_cmpgt_epu8_mask(a, b) simde_mm512_cmpgt_epu8_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_cmpgt_epi16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpgt_epi16_mask(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_cmpgt_epi16(a_.m256i[i], b_.m256i[i]);
}
return simde_mm512_movepi16_mask(simde__m512i_from_private(r_));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpgt_epi16_mask
#define _mm512_cmpgt_epi16_mask(a, b) simde_mm512_cmpgt_epi16_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmpgt_epi32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpgt_epi32_mask(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_cmpgt_epi32(a_.m256i[i], b_.m256i[i]);
}
return simde_mm512_movepi32_mask(simde__m512i_from_private(r_));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpgt_epi32_mask
#define _mm512_cmpgt_epi32_mask(a, b) simde_mm512_cmpgt_epi32_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_cmpgt_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpgt_epi32_mask(k1, a, b);
#else
return simde_mm512_cmpgt_epi32_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpgt_epi32_mask
#define _mm512_mask_cmpgt_epi32_mask(k1, a, b) simde_mm512_mask_cmpgt_epi32_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmpgt_epi64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpgt_epi64_mask(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_cmpgt_epi64(a_.m256i[i], b_.m256i[i]);
}
return simde_mm512_movepi64_mask(simde__m512i_from_private(r_));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpgt_epi64_mask
#define _mm512_cmpgt_epi64_mask(a, b) simde_mm512_cmpgt_epi64_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_cmpgt_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpgt_epi64_mask(k1, a, b);
#else
return simde_mm512_cmpgt_epi64_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpgt_epi64_mask
#define _mm512_mask_cmpgt_epi64_mask(k1, a, b) simde_mm512_mask_cmpgt_epi64_mask(k1, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CMPGT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/srai.h | .h | 3,086 | 97 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SRAI_H)
#define SIMDE_X86_AVX512_SRAI_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srai_epi16 (simde__m512i a, const int imm8) {
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
unsigned int shift = HEDLEY_STATIC_CAST(unsigned int, imm8);
if (shift > 15) shift = 15;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 >> HEDLEY_STATIC_CAST(int16_t, shift);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] >> shift;
}
#endif
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512BW_NATIVE)
# define simde_mm512_srai_epi16(a, imm8) _mm512_srai_epi16(a, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_srai_epi16
#define _mm512_srai_epi16(a, imm8) simde_mm512_srai_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srai_epi32 (simde__m512i a, const unsigned int imm8) {
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 >> HEDLEY_STATIC_CAST(int32_t, imm8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] >> imm8;
}
#endif
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
# define simde_mm512_srai_epi32(a, imm8) _mm512_srai_epi32(a, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_srai_epi32
#define _mm512_srai_epi32(a, imm8) simde_mm512_srai_epi32(a, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SRAI_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/sll.h | .h | 8,169 | 248 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SLL_H)
#define SIMDE_X86_AVX512_SLL_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "setzero.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sll_epi16 (simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_sll_epi16(a, count);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_sll_epi16(a_.m256i[i], count);
}
#else
simde__m128i_private
count_ = simde__m128i_to_private(count);
uint64_t shift = HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]);
if (shift > 15)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 << HEDLEY_STATIC_CAST(int16_t, shift);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i16[i] << (shift));
}
#endif
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_sll_epi16
#define _mm512_sll_epi16(a, count) simde_mm512_sll_epi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_sll_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX51BW_NATIVE)
return _mm512_mask_sll_epi16(src, k, a, count);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_sll_epi16(a, count));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_sll_epi16
#define _mm512_mask_sll_epi16(src, k, a, count) simde_mm512_mask_sll_epi16(src, k, a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_sll_epi16 (simde__mmask32 k, simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_sll_epi16(k, a, count);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_sll_epi16(a, count));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_sll_epi16
#define _mm512_maskz_sll_epi16(src, k, a, count) simde_mm512_maskz_sll_epi16(src, k, a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sll_epi32 (simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_sll_epi32(a, count);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_sll_epi32(a_.m256i[i], count);
}
#else
simde__m128i_private
count_ = simde__m128i_to_private(count);
uint64_t shift = HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]);
if (shift > 31)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 << HEDLEY_STATIC_CAST(int32_t, shift);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i32[i] << (shift));
}
#endif
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_sll_epi32
#define _mm512_sll_epi32(a, count) simde_mm512_sll_epi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_sll_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_sll_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_sll_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_sll_epi32
#define _mm512_mask_sll_epi32(src, k, a, b) simde_mm512_mask_sll_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_sll_epi32(simde__mmask16 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_sll_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_sll_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_sll_epi32
#define _mm512_maskz_sll_epi32(k, a, b) simde_mm512_maskz_sll_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sll_epi64 (simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_sll_epi64(a, count);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_sll_epi64(a_.m256i[i], count);
}
#else
simde__m128i_private
count_ = simde__m128i_to_private(count);
uint64_t shift = HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]);
if (shift > 63)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i64 = a_.i64 << HEDLEY_STATIC_CAST(int64_t, shift);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i64[i] << (shift));
}
#endif
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_sll_epi64
#define _mm512_sll_epi64(a, count) simde_mm512_sll_epi64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_sll_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_sll_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_sll_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_sll_epi64
#define _mm512_mask_sll_epi64(src, k, a, b) simde_mm512_mask_sll_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_sll_epi64(simde__mmask8 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_sll_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_sll_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_sll_epi64
#define _mm512_maskz_sll_epi64(k, a, b) simde_mm512_maskz_sll_epi64(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SLL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cmpneq.h | .h | 17,516 | 491 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_AVX512_CMPNEQ_H)
#define SIMDE_X86_AVX512_CMPNEQ_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "mov_mask.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_cmpneq_epi8_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpneq_epi8_mask(a, b);
#else
return ~simde_mm_movepi8_mask(simde_mm_cmpeq_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epi8_mask
#define _mm_cmpneq_epi8_mask(a, b) simde_mm_cmpneq_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_cmpneq_epi8_mask(simde__mmask16 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpneq_epi8_mask(k1, a, b);
#else
return simde_mm_cmpneq_epi8_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epi8_mask
#define _mm_mask_cmpneq_epi8_mask(a, b) simde_mm_mask_cmpneq_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_cmpneq_epu8_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpneq_epu8_mask(a, b);
#else
return simde_mm_cmpneq_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epu8_mask
#define _mm_cmpneq_epu8_mask(a, b) simde_mm_cmpneq_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_cmpneq_epu8_mask(simde__mmask16 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpneq_epu8_mask(k1, a, b);
#else
return simde_mm_mask_cmpneq_epi8_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epu8_mask
#define _mm_mask_cmpneq_epu8_mask(a, b) simde_mm_mask_cmpneq_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpneq_epi16_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpneq_epi16_mask(a, b);
#else
return ~simde_mm_movepi16_mask(simde_mm_cmpeq_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epi16_mask
#define _mm_cmpneq_epi16_mask(a, b) simde_mm_cmpneq_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpneq_epi16_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpneq_epi16_mask(k1, a, b);
#else
return simde_mm_cmpneq_epi16_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epi16_mask
#define _mm_mask_cmpneq_epi16_mask(a, b) simde_mm_mask_cmpneq_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpneq_epu16_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpneq_epu16_mask(a, b);
#else
return simde_mm_cmpneq_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epu16_mask
#define _mm_cmpneq_epu16_mask(a, b) simde_mm_cmpneq_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpneq_epu16_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpneq_epu16_mask(k1, a, b);
#else
return simde_mm_mask_cmpneq_epi16_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epu16_mask
#define _mm_mask_cmpneq_epu16_mask(a, b) simde_mm_mask_cmpneq_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpneq_epi32_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpneq_epi32_mask(a, b);
#else
return (~simde_mm_movepi32_mask(simde_mm_cmpeq_epi32(a, b))) & 15;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epi32_mask
#define _mm_cmpneq_epi32_mask(a, b) simde_mm_cmpneq_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpneq_epi32_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpneq_epi32_mask(k1, a, b);
#else
return simde_mm_cmpneq_epi32_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epi32_mask
#define _mm_mask_cmpneq_epi32_mask(a, b) simde_mm_mask_cmpneq_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpneq_epu32_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpneq_epu32_mask(a, b);
#else
return simde_mm_cmpneq_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epu32_mask
#define _mm_cmpneq_epu32_mask(a, b) simde_mm_cmpneq_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpneq_epu32_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpneq_epu32_mask(k1, a, b);
#else
return simde_mm_mask_cmpneq_epi32_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epu32_mask
#define _mm_mask_cmpneq_epu32_mask(a, b) simde_mm_mask_cmpneq_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpneq_epi64_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpneq_epi64_mask(a, b);
#else
return (~simde_mm_movepi64_mask(simde_mm_cmpeq_epi64(a, b))) & 3;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epi64_mask
#define _mm_cmpneq_epi64_mask(a, b) simde_mm_cmpneq_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpneq_epi64_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpneq_epi64_mask(k1, a, b);
#else
return simde_mm_cmpneq_epi64_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epi64_mask
#define _mm_mask_cmpneq_epi64_mask(a, b) simde_mm_mask_cmpneq_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpneq_epu64_mask(simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpneq_epu64_mask(a, b);
#else
return simde_mm_cmpneq_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpneq_epu64_mask
#define _mm_cmpneq_epu64_mask(a, b) simde_mm_cmpneq_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpneq_epu64_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpneq_epu64_mask(k1, a, b);
#else
return simde_mm_mask_cmpneq_epi64_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpneq_epu64_mask
#define _mm_mask_cmpneq_epu64_mask(a, b) simde_mm_mask_cmpneq_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_cmpneq_epi8_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpneq_epi8_mask(a, b);
#else
return ~simde_mm256_movepi8_mask(simde_mm256_cmpeq_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epi8_mask
#define _mm256_cmpneq_epi8_mask(a, b) simde_mm256_cmpneq_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_cmpneq_epi8_mask(simde__mmask32 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpneq_epi8_mask(k1, a, b);
#else
return simde_mm256_cmpneq_epi8_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epi8_mask
#define _mm256_mask_cmpneq_epi8_mask(a, b) simde_mm256_mask_cmpneq_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_cmpneq_epu8_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpneq_epu8_mask(a, b);
#else
return simde_mm256_cmpneq_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epu8_mask
#define _mm256_cmpneq_epu8_mask(a, b) simde_mm256_cmpneq_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_cmpneq_epu8_mask(simde__mmask32 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpneq_epu8_mask(k1, a, b);
#else
return simde_mm256_mask_cmpneq_epi8_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epu8_mask
#define _mm256_mask_cmpneq_epu8_mask(a, b) simde_mm256_mask_cmpneq_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_cmpneq_epi16_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpneq_epi16_mask(a, b);
#else
return ~simde_mm256_movepi16_mask(simde_mm256_cmpeq_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epi16_mask
#define _mm256_cmpneq_epi16_mask(a, b) simde_mm256_cmpneq_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_mask_cmpneq_epi16_mask(simde__mmask16 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpneq_epi16_mask(k1, a, b);
#else
return simde_mm256_cmpneq_epi16_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epi16_mask
#define _mm256_mask_cmpneq_epi16_mask(a, b) simde_mm256_mask_cmpneq_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_cmpneq_epu16_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpneq_epu16_mask(a, b);
#else
return simde_mm256_cmpneq_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epu16_mask
#define _mm256_cmpneq_epu16_mask(a, b) simde_mm256_cmpneq_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_mask_cmpneq_epu16_mask(simde__mmask16 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpneq_epu16_mask(k1, a, b);
#else
return simde_mm256_mask_cmpneq_epi16_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epu16_mask
#define _mm256_mask_cmpneq_epu16_mask(a, b) simde_mm256_mask_cmpneq_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpneq_epi32_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpneq_epi32_mask(a, b);
#else
return (~simde_mm256_movepi32_mask(simde_mm256_cmpeq_epi32(a, b)));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epi32_mask
#define _mm256_cmpneq_epi32_mask(a, b) simde_mm256_cmpneq_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpneq_epi32_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpneq_epi32_mask(k1, a, b);
#else
return simde_mm256_cmpneq_epi32_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epi32_mask
#define _mm256_mask_cmpneq_epi32_mask(a, b) simde_mm256_mask_cmpneq_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpneq_epu32_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpneq_epu32_mask(a, b);
#else
return simde_mm256_cmpneq_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epu32_mask
#define _mm256_cmpneq_epu32_mask(a, b) simde_mm256_cmpneq_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpneq_epu32_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpneq_epu32_mask(k1, a, b);
#else
return simde_mm256_mask_cmpneq_epi32_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epu32_mask
#define _mm256_mask_cmpneq_epu32_mask(a, b) simde_mm256_mask_cmpneq_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpneq_epi64_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpneq_epi64_mask(a, b);
#else
return (~simde_mm256_movepi64_mask(simde_mm256_cmpeq_epi64(a, b))) & 15;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epi64_mask
#define _mm256_cmpneq_epi64_mask(a, b) simde_mm256_cmpneq_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpneq_epi64_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpneq_epi64_mask(k1, a, b);
#else
return simde_mm256_cmpneq_epi64_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epi64_mask
#define _mm256_mask_cmpneq_epi64_mask(a, b) simde_mm256_mask_cmpneq_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpneq_epu64_mask(simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpneq_epu64_mask(a, b);
#else
return simde_mm256_cmpneq_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpneq_epu64_mask
#define _mm256_cmpneq_epu64_mask(a, b) simde_mm256_cmpneq_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpneq_epu64_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpneq_epu64_mask(k1, a, b);
#else
return simde_mm256_mask_cmpneq_epi64_mask(k1, a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpneq_epu64_mask
#define _mm256_mask_cmpneq_epu64_mask(a, b) simde_mm256_mask_cmpneq_epu64_mask((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CMPNEQ_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/permutex.h | .h | 4,051 | 102 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2023 Michael R. Crusoe <crusoe@debian.org>
*/
#if !defined(SIMDE_X86_AVX512_PERMUTEX_H)
#define SIMDE_X86_AVX512_PERMUTEX_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_permutex_epi64 (simde__m256i a, const int imm8) {
simde__m256i_private
a_ = simde__m256i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[(imm8 >> (i*2)) & 3];
}
return simde__m256i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_permutex_epi64(a, imm8) _mm256_permutex_epi64((a), (imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex_epi64
#define _mm256_permutex_epi64(a, imm8) simde_mm256_permutex_epi64((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_permutex_epi64 (simde__m512i a, const int imm8) {
simde__m512i_private
a_ = simde__m512i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i_private[0].i64) / sizeof(r_.m256i_private[0].i64[0])) ; i++) {
r_.m256i_private[0].i64[i] = a_.m256i_private[0].i64[(imm8 >> (i*2)) & 3];
r_.m256i_private[1].i64[i] = a_.m256i_private[1].i64[(imm8 >> (i*2)) & 3];
}
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_permutex_epi64(a, imm8) _mm512_permutex_epi64((a), (imm8))
#elif defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_permutex_epi64(a, imm8) SIMDE_STATEMENT_EXPR_(({\
simde__m512i_private simde_mm512_permutex_epi64_a_ = simde__m512i_to_private((a)), simde_mm512_permutex_epi64_r_; \
simde_mm512_permutex_epi64_r_.m256i[0] = simde_mm256_permutex_epi64(simde_mm512_permutex_epi64_a_.m256i[0], (imm8)); \
simde_mm512_permutex_epi64_r_.m256i[1] = simde_mm256_permutex_epi64(simde_mm512_permutex_epi64_a_.m256i[1], (imm8)); \
simde__m512i_from_private(simde_mm512_permutex_epi64_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex_epi64
#define _mm512_permutex_epi64(a, imm8) simde_mm512_permutex_epi64((a), (imm8))
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_permutex_epi64(src, k, a, imm8) _mm512_mask_permutex_epi64((src), (k), (a), (imm8))
#else
#define simde_mm512_mask_permutex_epi64(src, k, a, imm8) simde_mm512_mask_mov_epi64((src), (k), simde_mm512_permutex_epi64((a), (imm8)))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex_epi64
#define _mm512_mask_permutex_epi64(src, k, a, imm8) simde_mm512_mask_permutex_epi64((src), (k), (a), (imm8))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_PERMUTEX_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cmpge.h | .h | 50,531 | 1,435 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
* 2021 Andrew Rodriguez <anrodriguez@linkedin.com>
*/
#if !defined(SIMDE_X86_AVX512_CMPGE_H)
#define SIMDE_X86_AVX512_CMPGE_H
#include "types.h"
#include "mov.h"
#include "mov_mask.h"
#include "movm.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi8(_mm_cmpge_epi8_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcgeq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmpge(a_.altivec_i8, b_.altivec_i8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_cmpge_epi8_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpge_epi8_mask(a, b);
#else
return simde_mm_movepi8_mask(simde_x_mm_cmpge_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi8_mask
#define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_cmpge_epi8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpge_epi8_mask(k, a, b);
#else
return k & simde_mm_cmpge_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epi8_mask
#define _mm_mask_cmpge_epi8_mask(src, k, a, b) simde_mm_mask_cmpge_epi8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epi8 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi8(_mm256_cmpge_epi8_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi8(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_cmpge_epi8_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpge_epi8_mask(a, b);
#else
return simde_mm256_movepi8_mask(simde_x_mm256_cmpge_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi8_mask
#define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_cmpge_epi8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpge_epi8_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epi8_mask
#define _mm256_mask_cmpge_epi8_mask(src, k, a, b) simde_mm256_mask_cmpge_epi8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi8(_mm512_cmpge_epi8_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi8(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epi8(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmpge_epi8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpge_epi8_mask(a, b);
#else
return simde_mm512_movepi8_mask(simde_x_mm512_cmpge_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi8_mask
#define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_cmpge_epi8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmpge_epi8_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epi8_mask
#define _mm512_mask_cmpge_epi8_mask(src, k, a, b) simde_mm512_mask_cmpge_epi8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi8(_mm_cmpge_epu8_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcgeq_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpge(a_.altivec_u8, b_.altivec_u8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_cmpge_epu8_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpge_epu8_mask(a, b);
#else
return simde_mm_movepi8_mask(simde_x_mm_cmpge_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu8_mask
#define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_cmpge_epu8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpge_epu8_mask(k, a, b);
#else
return k & simde_mm_cmpge_epu8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epu8_mask
#define _mm_mask_cmpge_epu8_mask(src, k, a, b) simde_mm_mask_cmpge_epu8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epu8 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi8(_mm256_cmpge_epu8_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu8(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_cmpge_epu8_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpge_epu8_mask(a, b);
#else
return simde_mm256_movepi8_mask(simde_x_mm256_cmpge_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu8_mask
#define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_cmpge_epu8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpge_epu8_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epu8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epu8_mask
#define _mm256_mask_cmpge_epu8_mask(src, k, a, b) simde_mm256_mask_cmpge_epu8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epu8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi8(_mm512_cmpge_epu8_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu8(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epu8(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmpge_epu8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpge_epu8_mask(a, b);
#else
return simde_mm512_movepi8_mask(simde_x_mm512_cmpge_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu8_mask
#define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_cmpge_epu8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmpge_epu8_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epu8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epu8_mask
#define _mm512_mask_cmpge_epu8_mask(src, k, a, b) simde_mm512_mask_cmpge_epu8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi16(_mm_cmpge_epi16_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcgeq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmpge(a_.altivec_i16, b_.altivec_i16));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpge_epi16_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpge_epi16_mask(a, b);
#else
return simde_mm_movepi16_mask(simde_x_mm_cmpge_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi16_mask
#define _mm512_cmpge_epi16_mask(a, b) simde_mm512_cmpge_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpge_epi16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpge_epi16_mask(k, a, b);
#else
return k & simde_mm_cmpge_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epi16_mask
#define _mm_mask_cmpge_epi16_mask(src, k, a, b) simde_mm_mask_cmpge_epi16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epi16 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi16(_mm256_cmpge_epi16_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi16(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_cmpge_epi16_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpge_epi16_mask(a, b);
#else
return simde_mm256_movepi16_mask(simde_x_mm256_cmpge_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi16_mask
#define _mm512_cmpge_epi16_mask(a, b) simde_mm512_cmpge_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_mask_cmpge_epi16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpge_epi16_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epi16_mask
#define _mm256_mask_cmpge_epi16_mask(src, k, a, b) simde_mm256_mask_cmpge_epi16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi16(_mm512_cmpge_epi16_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi16(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epi16(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_cmpge_epi16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpge_epi16_mask(a, b);
#else
return simde_mm512_movepi16_mask(simde_x_mm512_cmpge_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi16_mask
#define _mm512_cmpge_epi16_mask(a, b) simde_mm512_cmpge_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_mask_cmpge_epi16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmpge_epi16_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epi16_mask
#define _mm512_mask_cmpge_epi16_mask(src, k, a, b) simde_mm512_mask_cmpge_epi16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epu16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi16(_mm_cmpge_epu16_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcgeq_u16(a_.neon_u16, b_.neon_u16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u16x8_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpge(a_.altivec_u16, b_.altivec_u16));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpge_epu16_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmpge_epu16_mask(a, b);
#else
return simde_mm_movepi16_mask(simde_x_mm_cmpge_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu16_mask
#define _mm512_cmpge_epu16_mask(a, b) simde_mm512_cmpge_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpge_epu16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmpge_epu16_mask(k, a, b);
#else
return k & simde_mm_cmpge_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epu16_mask
#define _mm_mask_cmpge_epu16_mask(src, k, a, b) simde_mm_mask_cmpge_epu16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epu16 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi16(_mm256_cmpge_epu16_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu16(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_cmpge_epu16_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmpge_epu16_mask(a, b);
#else
return simde_mm256_movepi16_mask(simde_x_mm256_cmpge_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu16_mask
#define _mm512_cmpge_epu16_mask(a, b) simde_mm512_cmpge_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_mask_cmpge_epu16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmpge_epu16_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epu16_mask
#define _mm256_mask_cmpge_epu16_mask(src, k, a, b) simde_mm256_mask_cmpge_epu16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epu16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi16(_mm512_cmpge_epu16_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu16(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epu16(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_cmpge_epu16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpge_epu16_mask(a, b);
#else
return simde_mm512_movepi16_mask(simde_x_mm512_cmpge_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu16_mask
#define _mm512_cmpge_epu16_mask(a, b) simde_mm512_cmpge_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_mask_cmpge_epu16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmpge_epu16_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epu16_mask
#define _mm512_mask_cmpge_epu16_mask(src, k, a, b) simde_mm512_mask_cmpge_epu16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi32(_mm_cmpge_epi32_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpge(a_.altivec_i32, b_.altivec_i32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpge_epi32_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpge_epi32_mask(a, b);
#else
return simde_mm_movepi32_mask(simde_x_mm_cmpge_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi32_mask
#define _mm512_cmpge_epi32_mask(a, b) simde_mm512_cmpge_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpge_epi32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpge_epi32_mask(k, a, b);
#else
return k & simde_mm_cmpge_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epi32_mask
#define _mm_mask_cmpge_epi32_mask(src, k, a, b) simde_mm_mask_cmpge_epi32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epi32 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi32(_mm256_cmpge_epi32_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi32(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpge_epi32_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpge_epi32_mask(a, b);
#else
return simde_mm256_movepi32_mask(simde_x_mm256_cmpge_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi32_mask
#define _mm512_cmpge_epi32_mask(a, b) simde_mm512_cmpge_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpge_epi32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpge_epi32_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epi32_mask
#define _mm256_mask_cmpge_epi32_mask(src, k, a, b) simde_mm256_mask_cmpge_epi32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi32(_mm512_cmpge_epi32_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi32(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epi32(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmpge_epi32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpge_epi32_mask(a, b);
#else
return simde_mm512_movepi32_mask(simde_x_mm512_cmpge_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi32_mask
#define _mm512_cmpge_epi32_mask(a, b) simde_mm512_cmpge_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_cmpge_epi32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpge_epi32_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epi32_mask
#define _mm512_mask_cmpge_epi32_mask(src, k, a, b) simde_mm512_mask_cmpge_epi32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epu32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi32(_mm_cmpge_epu32_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_u32(a_.neon_u32, b_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a_.altivec_u32, b_.altivec_u32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpge_epu32_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpge_epu32_mask(a, b);
#else
return simde_mm_movepi32_mask(simde_x_mm_cmpge_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu32_mask
#define _mm512_cmpge_epu32_mask(a, b) simde_mm512_cmpge_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpge_epu32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpge_epu32_mask(k, a, b);
#else
return k & simde_mm_cmpge_epu32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epu32_mask
#define _mm_mask_cmpge_epu32_mask(src, k, a, b) simde_mm_mask_cmpge_epu32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epu32 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi32(_mm256_cmpge_epu32_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu32(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpge_epu32_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpge_epu32_mask(a, b);
#else
return simde_mm256_movepi32_mask(simde_x_mm256_cmpge_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu32_mask
#define _mm512_cmpge_epu32_mask(a, b) simde_mm512_cmpge_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpge_epu32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpge_epu32_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epu32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epu32_mask
#define _mm256_mask_cmpge_epu32_mask(src, k, a, b) simde_mm256_mask_cmpge_epu32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epu32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi32(_mm512_cmpge_epu32_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu32(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epu32(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmpge_epu32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpge_epu32_mask(a, b);
#else
return simde_mm512_movepi32_mask(simde_x_mm512_cmpge_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu32_mask
#define _mm512_cmpge_epu32_mask(a, b) simde_mm512_cmpge_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_cmpge_epu32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpge_epu32_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epu32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epu32_mask
#define _mm512_mask_cmpge_epu32_mask(src, k, a, b) simde_mm512_mask_cmpge_epu32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi64(_mm_cmpge_epi64_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcgeq_s64(a_.neon_i64, b_.neon_i64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_cmpge(a_.altivec_i64, b_.altivec_i64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpge_epi64_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpge_epi64_mask(a, b);
#else
return simde_mm_movepi64_mask(simde_x_mm_cmpge_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_cmpge_epi64_mask
#define _mm_cmpge_epi64_mask(a, b) simde_mm_cmpge_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpge_epi64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpge_epi64_mask(k, a, b);
#else
return k & simde_mm_cmpge_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epi64_mask
#define _mm_mask_cmpge_epi64_mask(src, k, a, b) simde_mm_mask_cmpge_epi64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epi64 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi64(_mm256_cmpge_epi64_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi64(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpge_epi64_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpge_epi64_mask(a, b);
#else
return simde_mm256_movepi64_mask(simde_x_mm256_cmpge_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmpge_epi64_mask
#define _mm256_cmpge_epi64_mask(a, b) simde_mm256_cmpge_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpge_epi64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpge_epi64_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epi64_mask
#define _mm256_mask_cmpge_epi64_mask(src, k, a, b) simde_mm256_mask_cmpge_epi64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi64(_mm512_cmpge_epi64_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epi64(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epi64(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmpge_epi64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpge_epi64_mask(a, b);
#else
return simde_mm512_movepi64_mask(simde_x_mm512_cmpge_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epi64_mask
#define _mm512_cmpge_epi64_mask(a, b) simde_mm512_cmpge_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_cmpge_epi64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpge_epi64_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epi64_mask
#define _mm512_mask_cmpge_epi64_mask(src, k, a, b) simde_mm512_mask_cmpge_epi64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmpge_epu64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi64(_mm_cmpge_epu64_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcgeq_u64(a_.neon_u64, b_.neon_u64);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a_.altivec_u64, b_.altivec_u64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmpge_epu64_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmpge_epu64_mask(a, b);
#else
return simde_mm_movepi64_mask(simde_x_mm_cmpge_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu64_mask
#define _mm512_cmpge_epu64_mask(a, b) simde_mm512_cmpge_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmpge_epu64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmpge_epu64_mask(k, a, b);
#else
return k & simde_mm_cmpge_epu64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmpge_epu64_mask
#define _mm_mask_cmpge_epu64_mask(src, k, a, b) simde_mm_mask_cmpge_epu64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmpge_epu64 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi64(_mm256_cmpge_epu64_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu64(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmpge_epu64_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmpge_epu64_mask(a, b);
#else
return simde_mm256_movepi64_mask(simde_x_mm256_cmpge_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu64_mask
#define _mm512_cmpge_epu64_mask(a, b) simde_mm512_cmpge_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmpge_epu64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmpge_epu64_mask(k, a, b);
#else
return k & simde_mm256_cmpge_epu64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmpge_epu64_mask
#define _mm256_mask_cmpge_epu64_mask(src, k, a, b) simde_mm256_mask_cmpge_epu64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmpge_epu64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm512_movm_epi64(_mm512_cmpge_epu64_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmpge_epu64(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmpge_epu64(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmpge_epu64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpge_epu64_mask(a, b);
#else
return simde_mm512_movepi64_mask(simde_x_mm512_cmpge_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpge_epu64_mask
#define _mm512_cmpge_epu64_mask(a, b) simde_mm512_cmpge_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_cmpge_epu64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpge_epu64_mask(k, a, b);
#else
return k & simde_mm512_cmpge_epu64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpge_epu64_mask
#define _mm512_mask_cmpge_epu64_mask(src, k, a, b) simde_mm512_mask_cmpge_epu64_mask((src), (k), (a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CMPGE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/ternarylogic.h | .h | 112,262 | 3,770 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Kunwar Maheep Singh <kunwar.maheep@students.iiit.ac.in>
* 2021 Christopher Moore <moore@free.fr>
*/
/* The ternarylogic implementation is based on Wojciech Muła's work at
* https://github.com/WojciechMula/ternary-logic */
#if !defined(SIMDE_X86_AVX512_TERNARYLOGIC_H)
#define SIMDE_X86_AVX512_TERNARYLOGIC_H
#include "types.h"
#include "movm.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x00_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
HEDLEY_STATIC_CAST(void, b);
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t c0 = 0;
return c0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x01_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x02_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x03_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = b | a;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x04_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x05_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = c | a;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x06_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x07_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = a | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x08_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = t1 & c;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x09_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x0a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = c & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x0b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 | c;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x0c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x0d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = t1 | b;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x0e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b | c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x0f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = ~a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x10_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x11_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = c | b;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x12_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x13_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = b | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x14_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x15_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = c | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x16_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & t1;
const uint_fast32_t t3 = ~a;
const uint_fast32_t t4 = b ^ c;
const uint_fast32_t t5 = t3 & t4;
const uint_fast32_t t6 = t2 | t5;
return t6;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x17_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = b & c;
const uint_fast32_t t2 = (a & t0) | (~a & t1);
const uint_fast32_t t3 = ~t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x18_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x19_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = b & c;
const uint_fast32_t t2 = a & t1;
const uint_fast32_t t3 = t0 ^ t2;
const uint_fast32_t t4 = ~t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x1a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ c;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x1b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 | c;
const uint_fast32_t t3 = t0 ^ t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x1c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ b;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x1d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = t1 | b;
const uint_fast32_t t3 = t0 ^ t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x1e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x1f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a & t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x20_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = t1 & c;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x21_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = b | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x22_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = c & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x23_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 | c;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x24_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x25_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = a ^ t2;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x26_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x27_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 | c;
const uint_fast32_t t3 = t0 ^ t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x28_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = c & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x29_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | c;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = b ^ c;
const uint_fast32_t t4 = t2 ^ t3;
const uint_fast32_t t5 = t1 & t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x2a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x2b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c & t1;
const uint_fast32_t t3 = ~c;
const uint_fast32_t t4 = b | a;
const uint_fast32_t t5 = ~t4;
const uint_fast32_t t6 = t3 & t5;
const uint_fast32_t t7 = t2 | t6;
return t7;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x2c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x2d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = b | t0;
const uint_fast32_t t2 = a ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x2e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a & b;
const uint_fast32_t t2 = t0 ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x2f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 & c;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x30_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x31_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = t1 | a;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x32_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a | c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x33_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = ~b;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x34_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ b;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x35_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = t1 | a;
const uint_fast32_t t3 = t0 ^ t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x36_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = b ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x37_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = b & t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x38_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x39_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = b ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x3a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a & t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 & c;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x3b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 & c;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x3c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = b ^ a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x3d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = a | c;
const uint_fast32_t t2 = ~t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x3e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t t2 = a ^ b;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x3f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x40_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = t1 & b;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x41_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = c | t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x42_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x43_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = a ^ t2;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x44_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = b & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x45_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 | b;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x46_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x47_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 | b;
const uint_fast32_t t3 = t0 ^ t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x48_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = b & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x49_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | b;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = b ^ c;
const uint_fast32_t t4 = t2 ^ t3;
const uint_fast32_t t5 = t1 & t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x4a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x4b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | c;
const uint_fast32_t t2 = a ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x4c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x4d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b & t1;
const uint_fast32_t t3 = ~b;
const uint_fast32_t t4 = a | c;
const uint_fast32_t t5 = ~t4;
const uint_fast32_t t6 = t3 & t5;
const uint_fast32_t t7 = t2 | t6;
return t7;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x4e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = c & t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = t2 & b;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x4f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = b & t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x50_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x51_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 | a;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x52_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ c;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x53_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 | a;
const uint_fast32_t t3 = t0 ^ t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x54_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a | b;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x55_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = ~c;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x56_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | a;
const uint_fast32_t t1 = c ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x57_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | a;
const uint_fast32_t t1 = c & t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x58_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x59_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = c ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x5a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = c ^ a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x5b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x5c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a & t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 & b;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x5d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 & b;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x5e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = a ^ c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x5f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = c & a;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x60_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x61_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = a ^ c;
const uint_fast32_t t4 = t2 ^ t3;
const uint_fast32_t t5 = t1 & t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x62_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x63_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | c;
const uint_fast32_t t2 = b ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x64_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | b;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x65_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | b;
const uint_fast32_t t2 = c ^ t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x66_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = c ^ b;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x67_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a | b;
const uint_fast32_t t2 = ~t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x68_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a & t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = b & c;
const uint_fast32_t t4 = t2 & t3;
const uint_fast32_t t5 = t1 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x69_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x6a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = c ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x6b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0);
const uint_fast32_t t2 = a ^ c1;
const uint_fast32_t t3 = b ^ c;
const uint_fast32_t t4 = t2 ^ t3;
const uint_fast32_t t5 = t1 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x6c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = b ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x6d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0);
const uint_fast32_t t2 = a ^ c1;
const uint_fast32_t t3 = b ^ c;
const uint_fast32_t t4 = t2 ^ t3;
const uint_fast32_t t5 = t1 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x6e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x6f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x70_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x71_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = a & t2;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x72_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = c & t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = t2 & a;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x73_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = a & t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x74_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = b & t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 & a;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x75_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = a & t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x76_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x77_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = c & b;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x78_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = a ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x79_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0);
const uint_fast32_t t2 = b ^ c1;
const uint_fast32_t t3 = a ^ c;
const uint_fast32_t t4 = t2 ^ t3;
const uint_fast32_t t5 = t1 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x7a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = a ^ c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x7b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x7c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = a ^ b;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x7d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x7e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x7f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0);
const uint_fast32_t t2 = t1 ^ c1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x80_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = a & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x81_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = a ^ t2;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x82_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x83_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 | c;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x84_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x85_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = t2 | b;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x86_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = c ^ t1;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x87_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = a ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x88_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = c & b;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x89_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 | b;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x8a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | b;
const uint_fast32_t t2 = c & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x8b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | b;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 | c;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x8c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 | c;
const uint_fast32_t t2 = b & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x8d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 | b;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 | c;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x8e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = t1 & t2;
const uint_fast32_t t4 = t0 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x8f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b & c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x90_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x91_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 | a;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x92_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = c ^ t1;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x93_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = b ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x94_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = b ^ t1;
const uint_fast32_t t3 = t0 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x95_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = c ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x96_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a ^ t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x97_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = t1 ^ a;
const uint_fast32_t t3 = b ^ c;
const uint_fast32_t t4 = a ^ t3;
const uint_fast32_t t5 = t2 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x98_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a | b;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x99_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = c ^ b;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x9a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = t1 ^ c;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x9b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 & c;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x9c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = t1 ^ b;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x9d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 & b;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x9e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = c ^ t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0x9f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a & t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = c & a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 | a;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a | t0;
const uint_fast32_t t2 = c & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 | c;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a | b;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = c ^ a;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = t1 ^ c;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 & c;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | b;
const uint_fast32_t t1 = c & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xa9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | a;
const uint_fast32_t t1 = c ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xaa_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
HEDLEY_STATIC_CAST(void, b);
return c;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xab_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xac_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 & b;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xad_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b & c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xae_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = t1 | c;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xaf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = c | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | c;
const uint_fast32_t t2 = a & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 | c;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = b & t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = a | c;
const uint_fast32_t t4 = t2 & t3;
const uint_fast32_t t5 = t1 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a & c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = t1 ^ a;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~b;
const uint_fast32_t t3 = t2 & a;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = c ^ t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = b & t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 & a;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xb9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xba_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = t1 | c;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xbb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = c | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xbc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = a ^ b;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xbd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ b;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xbe_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = c | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xbf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = b & a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = t2 | a;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a | c;
const uint_fast32_t t3 = t1 & t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = ~t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = b & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 | a;
const uint_fast32_t t2 = ~a;
const uint_fast32_t t3 = t2 | b;
const uint_fast32_t t4 = t1 & t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t t2 = t1 ^ b;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = t2 & b;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = b & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xc9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = b ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xca_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 & c;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xcb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b & c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xcc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
HEDLEY_STATIC_CAST(void, c);
return b;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xcd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xce_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t t2 = t1 | b;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xcf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = b | t0;
const uint_fast32_t t2 = a & t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & b;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t t2 = t1 ^ a;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = ~c;
const uint_fast32_t t3 = t2 & a;
const uint_fast32_t t4 = t1 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = b & t0;
const uint_fast32_t t2 = b ^ c;
const uint_fast32_t t3 = ~t2;
const uint_fast32_t t4 = a & t3;
const uint_fast32_t t5 = t1 | t4;
return t5;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a & b;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = b ^ t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = c & t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = c & b;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = t1 & a;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xd9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & b;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xda_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = a ^ c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xdb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a ^ c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xdc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & a;
const uint_fast32_t t2 = t1 | b;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xdd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = b | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xde_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = b | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xdf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a & t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a ^ t0;
const uint_fast32_t t2 = ~t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = ~b;
const uint_fast32_t t2 = t1 & c;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ b;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & c;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = c & a;
const uint_fast32_t t1 = ~c;
const uint_fast32_t t2 = t1 & b;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a & b;
const uint_fast32_t t3 = t1 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & b;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~a;
const uint_fast32_t t2 = t1 ^ c;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = a & t1;
const uint_fast32_t t3 = t0 | t2;
return t3;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xe9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b ^ c;
const uint_fast32_t t2 = t0 ^ t1;
const uint_fast32_t t3 = a & b;
const uint_fast32_t t4 = t2 | t3;
return t4;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xea_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & a;
const uint_fast32_t t1 = c | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xeb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ a;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = c | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xec_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a & c;
const uint_fast32_t t1 = b | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xed_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = a ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = b | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xee_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
const uint_fast32_t t0 = c | b;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xef_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~a;
const uint_fast32_t t1 = b | c;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
HEDLEY_STATIC_CAST(void, c);
return a;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 & c;
const uint_fast32_t t2 = t1 | a;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = a | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = t0 & b;
const uint_fast32_t t2 = t1 | a;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = a | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b & c;
const uint_fast32_t t1 = a | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xf9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b ^ c;
const uint_fast32_t t1 = ~t0;
const uint_fast32_t t2 = a | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xfa_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, b);
const uint_fast32_t t0 = c | a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xfb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~b;
const uint_fast32_t t1 = t0 | c;
const uint_fast32_t t2 = a | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xfc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t t0 = b | a;
return t0;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xfd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = ~c;
const uint_fast32_t t1 = a | b;
const uint_fast32_t t2 = t0 | t1;
return t2;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xfe_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
const uint_fast32_t t0 = b | c;
const uint_fast32_t t1 = a | t0;
return t1;
}
SIMDE_FUNCTION_ATTRIBUTES
uint_fast32_t
simde_x_ternarylogic_0xff_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) {
HEDLEY_STATIC_CAST(void, a);
HEDLEY_STATIC_CAST(void, b);
HEDLEY_STATIC_CAST(void, c);
const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0);
return c1;
}
#define SIMDE_X_TERNARYLOGIC_CASE(value) \
case value: \
SIMDE_VECTORIZE \
for (size_t i = 0 ; i < (sizeof(r_.u32f) / sizeof(r_.u32f[0])) ; i++) { \
r_.u32f[i] = HEDLEY_CONCAT3(simde_x_ternarylogic_, value, _impl_)(a_.u32f[i], b_.u32f[i], c_.u32f[i]); \
} \
break;
#define SIMDE_X_TERNARYLOGIC_SWITCH(value) \
switch(value) { \
SIMDE_X_TERNARYLOGIC_CASE(0x00) \
SIMDE_X_TERNARYLOGIC_CASE(0x01) \
SIMDE_X_TERNARYLOGIC_CASE(0x02) \
SIMDE_X_TERNARYLOGIC_CASE(0x03) \
SIMDE_X_TERNARYLOGIC_CASE(0x04) \
SIMDE_X_TERNARYLOGIC_CASE(0x05) \
SIMDE_X_TERNARYLOGIC_CASE(0x06) \
SIMDE_X_TERNARYLOGIC_CASE(0x07) \
SIMDE_X_TERNARYLOGIC_CASE(0x08) \
SIMDE_X_TERNARYLOGIC_CASE(0x09) \
SIMDE_X_TERNARYLOGIC_CASE(0x0a) \
SIMDE_X_TERNARYLOGIC_CASE(0x0b) \
SIMDE_X_TERNARYLOGIC_CASE(0x0c) \
SIMDE_X_TERNARYLOGIC_CASE(0x0d) \
SIMDE_X_TERNARYLOGIC_CASE(0x0e) \
SIMDE_X_TERNARYLOGIC_CASE(0x0f) \
SIMDE_X_TERNARYLOGIC_CASE(0x10) \
SIMDE_X_TERNARYLOGIC_CASE(0x11) \
SIMDE_X_TERNARYLOGIC_CASE(0x12) \
SIMDE_X_TERNARYLOGIC_CASE(0x13) \
SIMDE_X_TERNARYLOGIC_CASE(0x14) \
SIMDE_X_TERNARYLOGIC_CASE(0x15) \
SIMDE_X_TERNARYLOGIC_CASE(0x16) \
SIMDE_X_TERNARYLOGIC_CASE(0x17) \
SIMDE_X_TERNARYLOGIC_CASE(0x18) \
SIMDE_X_TERNARYLOGIC_CASE(0x19) \
SIMDE_X_TERNARYLOGIC_CASE(0x1a) \
SIMDE_X_TERNARYLOGIC_CASE(0x1b) \
SIMDE_X_TERNARYLOGIC_CASE(0x1c) \
SIMDE_X_TERNARYLOGIC_CASE(0x1d) \
SIMDE_X_TERNARYLOGIC_CASE(0x1e) \
SIMDE_X_TERNARYLOGIC_CASE(0x1f) \
SIMDE_X_TERNARYLOGIC_CASE(0x20) \
SIMDE_X_TERNARYLOGIC_CASE(0x21) \
SIMDE_X_TERNARYLOGIC_CASE(0x22) \
SIMDE_X_TERNARYLOGIC_CASE(0x23) \
SIMDE_X_TERNARYLOGIC_CASE(0x24) \
SIMDE_X_TERNARYLOGIC_CASE(0x25) \
SIMDE_X_TERNARYLOGIC_CASE(0x26) \
SIMDE_X_TERNARYLOGIC_CASE(0x27) \
SIMDE_X_TERNARYLOGIC_CASE(0x28) \
SIMDE_X_TERNARYLOGIC_CASE(0x29) \
SIMDE_X_TERNARYLOGIC_CASE(0x2a) \
SIMDE_X_TERNARYLOGIC_CASE(0x2b) \
SIMDE_X_TERNARYLOGIC_CASE(0x2c) \
SIMDE_X_TERNARYLOGIC_CASE(0x2d) \
SIMDE_X_TERNARYLOGIC_CASE(0x2e) \
SIMDE_X_TERNARYLOGIC_CASE(0x2f) \
SIMDE_X_TERNARYLOGIC_CASE(0x30) \
SIMDE_X_TERNARYLOGIC_CASE(0x31) \
SIMDE_X_TERNARYLOGIC_CASE(0x32) \
SIMDE_X_TERNARYLOGIC_CASE(0x33) \
SIMDE_X_TERNARYLOGIC_CASE(0x34) \
SIMDE_X_TERNARYLOGIC_CASE(0x35) \
SIMDE_X_TERNARYLOGIC_CASE(0x36) \
SIMDE_X_TERNARYLOGIC_CASE(0x37) \
SIMDE_X_TERNARYLOGIC_CASE(0x38) \
SIMDE_X_TERNARYLOGIC_CASE(0x39) \
SIMDE_X_TERNARYLOGIC_CASE(0x3a) \
SIMDE_X_TERNARYLOGIC_CASE(0x3b) \
SIMDE_X_TERNARYLOGIC_CASE(0x3c) \
SIMDE_X_TERNARYLOGIC_CASE(0x3d) \
SIMDE_X_TERNARYLOGIC_CASE(0x3e) \
SIMDE_X_TERNARYLOGIC_CASE(0x3f) \
SIMDE_X_TERNARYLOGIC_CASE(0x40) \
SIMDE_X_TERNARYLOGIC_CASE(0x41) \
SIMDE_X_TERNARYLOGIC_CASE(0x42) \
SIMDE_X_TERNARYLOGIC_CASE(0x43) \
SIMDE_X_TERNARYLOGIC_CASE(0x44) \
SIMDE_X_TERNARYLOGIC_CASE(0x45) \
SIMDE_X_TERNARYLOGIC_CASE(0x46) \
SIMDE_X_TERNARYLOGIC_CASE(0x47) \
SIMDE_X_TERNARYLOGIC_CASE(0x48) \
SIMDE_X_TERNARYLOGIC_CASE(0x49) \
SIMDE_X_TERNARYLOGIC_CASE(0x4a) \
SIMDE_X_TERNARYLOGIC_CASE(0x4b) \
SIMDE_X_TERNARYLOGIC_CASE(0x4c) \
SIMDE_X_TERNARYLOGIC_CASE(0x4d) \
SIMDE_X_TERNARYLOGIC_CASE(0x4e) \
SIMDE_X_TERNARYLOGIC_CASE(0x4f) \
SIMDE_X_TERNARYLOGIC_CASE(0x50) \
SIMDE_X_TERNARYLOGIC_CASE(0x51) \
SIMDE_X_TERNARYLOGIC_CASE(0x52) \
SIMDE_X_TERNARYLOGIC_CASE(0x53) \
SIMDE_X_TERNARYLOGIC_CASE(0x54) \
SIMDE_X_TERNARYLOGIC_CASE(0x55) \
SIMDE_X_TERNARYLOGIC_CASE(0x56) \
SIMDE_X_TERNARYLOGIC_CASE(0x57) \
SIMDE_X_TERNARYLOGIC_CASE(0x58) \
SIMDE_X_TERNARYLOGIC_CASE(0x59) \
SIMDE_X_TERNARYLOGIC_CASE(0x5a) \
SIMDE_X_TERNARYLOGIC_CASE(0x5b) \
SIMDE_X_TERNARYLOGIC_CASE(0x5c) \
SIMDE_X_TERNARYLOGIC_CASE(0x5d) \
SIMDE_X_TERNARYLOGIC_CASE(0x5e) \
SIMDE_X_TERNARYLOGIC_CASE(0x5f) \
SIMDE_X_TERNARYLOGIC_CASE(0x60) \
SIMDE_X_TERNARYLOGIC_CASE(0x61) \
SIMDE_X_TERNARYLOGIC_CASE(0x62) \
SIMDE_X_TERNARYLOGIC_CASE(0x63) \
SIMDE_X_TERNARYLOGIC_CASE(0x64) \
SIMDE_X_TERNARYLOGIC_CASE(0x65) \
SIMDE_X_TERNARYLOGIC_CASE(0x66) \
SIMDE_X_TERNARYLOGIC_CASE(0x67) \
SIMDE_X_TERNARYLOGIC_CASE(0x68) \
SIMDE_X_TERNARYLOGIC_CASE(0x69) \
SIMDE_X_TERNARYLOGIC_CASE(0x6a) \
SIMDE_X_TERNARYLOGIC_CASE(0x6b) \
SIMDE_X_TERNARYLOGIC_CASE(0x6c) \
SIMDE_X_TERNARYLOGIC_CASE(0x6d) \
SIMDE_X_TERNARYLOGIC_CASE(0x6e) \
SIMDE_X_TERNARYLOGIC_CASE(0x6f) \
SIMDE_X_TERNARYLOGIC_CASE(0x70) \
SIMDE_X_TERNARYLOGIC_CASE(0x71) \
SIMDE_X_TERNARYLOGIC_CASE(0x72) \
SIMDE_X_TERNARYLOGIC_CASE(0x73) \
SIMDE_X_TERNARYLOGIC_CASE(0x74) \
SIMDE_X_TERNARYLOGIC_CASE(0x75) \
SIMDE_X_TERNARYLOGIC_CASE(0x76) \
SIMDE_X_TERNARYLOGIC_CASE(0x77) \
SIMDE_X_TERNARYLOGIC_CASE(0x78) \
SIMDE_X_TERNARYLOGIC_CASE(0x79) \
SIMDE_X_TERNARYLOGIC_CASE(0x7a) \
SIMDE_X_TERNARYLOGIC_CASE(0x7b) \
SIMDE_X_TERNARYLOGIC_CASE(0x7c) \
SIMDE_X_TERNARYLOGIC_CASE(0x7d) \
SIMDE_X_TERNARYLOGIC_CASE(0x7e) \
SIMDE_X_TERNARYLOGIC_CASE(0x7f) \
SIMDE_X_TERNARYLOGIC_CASE(0x80) \
SIMDE_X_TERNARYLOGIC_CASE(0x81) \
SIMDE_X_TERNARYLOGIC_CASE(0x82) \
SIMDE_X_TERNARYLOGIC_CASE(0x83) \
SIMDE_X_TERNARYLOGIC_CASE(0x84) \
SIMDE_X_TERNARYLOGIC_CASE(0x85) \
SIMDE_X_TERNARYLOGIC_CASE(0x86) \
SIMDE_X_TERNARYLOGIC_CASE(0x87) \
SIMDE_X_TERNARYLOGIC_CASE(0x88) \
SIMDE_X_TERNARYLOGIC_CASE(0x89) \
SIMDE_X_TERNARYLOGIC_CASE(0x8a) \
SIMDE_X_TERNARYLOGIC_CASE(0x8b) \
SIMDE_X_TERNARYLOGIC_CASE(0x8c) \
SIMDE_X_TERNARYLOGIC_CASE(0x8d) \
SIMDE_X_TERNARYLOGIC_CASE(0x8e) \
SIMDE_X_TERNARYLOGIC_CASE(0x8f) \
SIMDE_X_TERNARYLOGIC_CASE(0x90) \
SIMDE_X_TERNARYLOGIC_CASE(0x91) \
SIMDE_X_TERNARYLOGIC_CASE(0x92) \
SIMDE_X_TERNARYLOGIC_CASE(0x93) \
SIMDE_X_TERNARYLOGIC_CASE(0x94) \
SIMDE_X_TERNARYLOGIC_CASE(0x95) \
SIMDE_X_TERNARYLOGIC_CASE(0x96) \
SIMDE_X_TERNARYLOGIC_CASE(0x97) \
SIMDE_X_TERNARYLOGIC_CASE(0x98) \
SIMDE_X_TERNARYLOGIC_CASE(0x99) \
SIMDE_X_TERNARYLOGIC_CASE(0x9a) \
SIMDE_X_TERNARYLOGIC_CASE(0x9b) \
SIMDE_X_TERNARYLOGIC_CASE(0x9c) \
SIMDE_X_TERNARYLOGIC_CASE(0x9d) \
SIMDE_X_TERNARYLOGIC_CASE(0x9e) \
SIMDE_X_TERNARYLOGIC_CASE(0x9f) \
SIMDE_X_TERNARYLOGIC_CASE(0xa0) \
SIMDE_X_TERNARYLOGIC_CASE(0xa1) \
SIMDE_X_TERNARYLOGIC_CASE(0xa2) \
SIMDE_X_TERNARYLOGIC_CASE(0xa3) \
SIMDE_X_TERNARYLOGIC_CASE(0xa4) \
SIMDE_X_TERNARYLOGIC_CASE(0xa5) \
SIMDE_X_TERNARYLOGIC_CASE(0xa6) \
SIMDE_X_TERNARYLOGIC_CASE(0xa7) \
SIMDE_X_TERNARYLOGIC_CASE(0xa8) \
SIMDE_X_TERNARYLOGIC_CASE(0xa9) \
SIMDE_X_TERNARYLOGIC_CASE(0xaa) \
SIMDE_X_TERNARYLOGIC_CASE(0xab) \
SIMDE_X_TERNARYLOGIC_CASE(0xac) \
SIMDE_X_TERNARYLOGIC_CASE(0xad) \
SIMDE_X_TERNARYLOGIC_CASE(0xae) \
SIMDE_X_TERNARYLOGIC_CASE(0xaf) \
SIMDE_X_TERNARYLOGIC_CASE(0xb0) \
SIMDE_X_TERNARYLOGIC_CASE(0xb1) \
SIMDE_X_TERNARYLOGIC_CASE(0xb2) \
SIMDE_X_TERNARYLOGIC_CASE(0xb3) \
SIMDE_X_TERNARYLOGIC_CASE(0xb4) \
SIMDE_X_TERNARYLOGIC_CASE(0xb5) \
SIMDE_X_TERNARYLOGIC_CASE(0xb6) \
SIMDE_X_TERNARYLOGIC_CASE(0xb7) \
SIMDE_X_TERNARYLOGIC_CASE(0xb8) \
SIMDE_X_TERNARYLOGIC_CASE(0xb9) \
SIMDE_X_TERNARYLOGIC_CASE(0xba) \
SIMDE_X_TERNARYLOGIC_CASE(0xbb) \
SIMDE_X_TERNARYLOGIC_CASE(0xbc) \
SIMDE_X_TERNARYLOGIC_CASE(0xbd) \
SIMDE_X_TERNARYLOGIC_CASE(0xbe) \
SIMDE_X_TERNARYLOGIC_CASE(0xbf) \
SIMDE_X_TERNARYLOGIC_CASE(0xc0) \
SIMDE_X_TERNARYLOGIC_CASE(0xc1) \
SIMDE_X_TERNARYLOGIC_CASE(0xc2) \
SIMDE_X_TERNARYLOGIC_CASE(0xc3) \
SIMDE_X_TERNARYLOGIC_CASE(0xc4) \
SIMDE_X_TERNARYLOGIC_CASE(0xc5) \
SIMDE_X_TERNARYLOGIC_CASE(0xc6) \
SIMDE_X_TERNARYLOGIC_CASE(0xc7) \
SIMDE_X_TERNARYLOGIC_CASE(0xc8) \
SIMDE_X_TERNARYLOGIC_CASE(0xc9) \
SIMDE_X_TERNARYLOGIC_CASE(0xca) \
SIMDE_X_TERNARYLOGIC_CASE(0xcb) \
SIMDE_X_TERNARYLOGIC_CASE(0xcc) \
SIMDE_X_TERNARYLOGIC_CASE(0xcd) \
SIMDE_X_TERNARYLOGIC_CASE(0xce) \
SIMDE_X_TERNARYLOGIC_CASE(0xcf) \
SIMDE_X_TERNARYLOGIC_CASE(0xd0) \
SIMDE_X_TERNARYLOGIC_CASE(0xd1) \
SIMDE_X_TERNARYLOGIC_CASE(0xd2) \
SIMDE_X_TERNARYLOGIC_CASE(0xd3) \
SIMDE_X_TERNARYLOGIC_CASE(0xd4) \
SIMDE_X_TERNARYLOGIC_CASE(0xd5) \
SIMDE_X_TERNARYLOGIC_CASE(0xd6) \
SIMDE_X_TERNARYLOGIC_CASE(0xd7) \
SIMDE_X_TERNARYLOGIC_CASE(0xd8) \
SIMDE_X_TERNARYLOGIC_CASE(0xd9) \
SIMDE_X_TERNARYLOGIC_CASE(0xda) \
SIMDE_X_TERNARYLOGIC_CASE(0xdb) \
SIMDE_X_TERNARYLOGIC_CASE(0xdc) \
SIMDE_X_TERNARYLOGIC_CASE(0xdd) \
SIMDE_X_TERNARYLOGIC_CASE(0xde) \
SIMDE_X_TERNARYLOGIC_CASE(0xdf) \
SIMDE_X_TERNARYLOGIC_CASE(0xe0) \
SIMDE_X_TERNARYLOGIC_CASE(0xe1) \
SIMDE_X_TERNARYLOGIC_CASE(0xe2) \
SIMDE_X_TERNARYLOGIC_CASE(0xe3) \
SIMDE_X_TERNARYLOGIC_CASE(0xe4) \
SIMDE_X_TERNARYLOGIC_CASE(0xe5) \
SIMDE_X_TERNARYLOGIC_CASE(0xe6) \
SIMDE_X_TERNARYLOGIC_CASE(0xe7) \
SIMDE_X_TERNARYLOGIC_CASE(0xe8) \
SIMDE_X_TERNARYLOGIC_CASE(0xe9) \
SIMDE_X_TERNARYLOGIC_CASE(0xea) \
SIMDE_X_TERNARYLOGIC_CASE(0xeb) \
SIMDE_X_TERNARYLOGIC_CASE(0xec) \
SIMDE_X_TERNARYLOGIC_CASE(0xed) \
SIMDE_X_TERNARYLOGIC_CASE(0xee) \
SIMDE_X_TERNARYLOGIC_CASE(0xef) \
SIMDE_X_TERNARYLOGIC_CASE(0xf0) \
SIMDE_X_TERNARYLOGIC_CASE(0xf1) \
SIMDE_X_TERNARYLOGIC_CASE(0xf2) \
SIMDE_X_TERNARYLOGIC_CASE(0xf3) \
SIMDE_X_TERNARYLOGIC_CASE(0xf4) \
SIMDE_X_TERNARYLOGIC_CASE(0xf5) \
SIMDE_X_TERNARYLOGIC_CASE(0xf6) \
SIMDE_X_TERNARYLOGIC_CASE(0xf7) \
SIMDE_X_TERNARYLOGIC_CASE(0xf8) \
SIMDE_X_TERNARYLOGIC_CASE(0xf9) \
SIMDE_X_TERNARYLOGIC_CASE(0xfa) \
SIMDE_X_TERNARYLOGIC_CASE(0xfb) \
SIMDE_X_TERNARYLOGIC_CASE(0xfc) \
SIMDE_X_TERNARYLOGIC_CASE(0xfd) \
SIMDE_X_TERNARYLOGIC_CASE(0xfe) \
SIMDE_X_TERNARYLOGIC_CASE(0xff) \
}
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_ternarylogic_epi32(a, b, c, imm8) _mm_ternarylogic_epi32(a, b, c, imm8)
#else
SIMDE_HUGE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_ternarylogic_epi32(simde__m128i a, simde__m128i b, simde__m128i c, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b),
c_ = simde__m128i_to_private(c);
#if defined(SIMDE_TERNARYLOGIC_COMPRESSION)
int to_do, mask;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128i_private t_;
to_do = imm8;
r_.u64 = a_.u64 ^ a_.u64;
mask = 0xFF;
if ((to_do & mask) == mask) {
r_.u64 = ~r_.u64;
to_do &= ~mask;
}
mask = 0xF0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 = a_.u64;
to_do &= ~mask;
}
mask = 0xCC;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64;
to_do &= ~mask;
}
mask = 0xAA;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= c_.u64;
to_do &= ~mask;
}
mask = 0x0F;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~a_.u64;
to_do &= ~mask;
}
mask = 0x33;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~b_.u64;
to_do &= ~mask;
}
mask = 0x55;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64;
to_do &= ~mask;
}
mask = 0x3C;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 ^ b_.u64;
to_do &= ~mask;
}
mask = 0x5A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 ^ c_.u64;
to_do &= ~mask;
}
mask = 0x66;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64 ^ c_.u64;
to_do &= ~mask;
}
mask = 0xA0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x50;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64 & a_.u64;
to_do &= ~mask;
}
mask = 0x0A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~a_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x88;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x44;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64 & b_.u64;
to_do &= ~mask;
}
mask = 0x22;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~b_.u64 & c_.u64;
to_do &= ~mask;
}
if (to_do & 0xc0) {
t_.u64 = a_.u64 & b_.u64;
if ((to_do & 0xc0) == 0xc0) r_.u64 |= t_.u64;
else if (to_do & 0x80) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x30) {
t_.u64 = ~b_.u64 & a_.u64;
if ((to_do & 0x30) == 0x30) r_.u64 |= t_.u64;
else if (to_do & 0x20) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x0c) {
t_.u64 = ~a_.u64 & b_.u64;
if ((to_do & 0x0c) == 0x0c) r_.u64 |= t_.u64;
else if (to_do & 0x08) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x03) {
t_.u64 = ~(a_.u64 | b_.u64);
if ((to_do & 0x03) == 0x03) r_.u64 |= t_.u64;
else if (to_do & 0x02) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
#else
uint64_t t;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
to_do = imm8;
mask = 0xFF;
if ((to_do & mask) == mask) {
r_.u64[i] = UINT64_MAX;
to_do &= ~mask;
}
else r_.u64[i] = 0;
mask = 0xF0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] = a_.u64[i];
to_do &= ~mask;
}
mask = 0xCC;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i];
to_do &= ~mask;
}
mask = 0xAA;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= c_.u64[i];
to_do &= ~mask;
}
mask = 0x0F;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~a_.u64[i];
to_do &= ~mask;
}
mask = 0x33;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~b_.u64[i];
to_do &= ~mask;
}
mask = 0x55;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i];
to_do &= ~mask;
}
mask = 0x3C;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] ^ b_.u64[i];
to_do &= ~mask;
}
mask = 0x5A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] ^ c_.u64[i];
to_do &= ~mask;
}
mask = 0x66;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i] ^ c_.u64[i];
to_do &= ~mask;
}
mask = 0xA0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x50;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i] & a_.u64[i];
to_do &= ~mask;
}
mask = 0x0A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~a_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x88;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x44;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i] & b_.u64[i];
to_do &= ~mask;
}
mask = 0x22;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~b_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
if (to_do & 0xc0) {
t = a_.u64[i] & b_.u64[i];
if ((to_do & 0xc0) == 0xc0) r_.u64[i] |= t;
else if (to_do & 0x80) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x30) {
t = ~b_.u64[i] & a_.u64[i];
if ((to_do & 0x30) == 0x30) r_.u64[i] |= t;
else if (to_do & 0x20) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x0c) {
t = ~a_.u64[i] & b_.u64[i];
if ((to_do & 0x0c) == 0x0c) r_.u64[i] |= t;
else if (to_do & 0x08) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x03) {
t = ~(a_.u64[i] | b_.u64[i]);
if ((to_do & 0x03) == 0x03) r_.u64[i] |= t;
else if (to_do & 0x02) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
}
#endif
#else
SIMDE_X_TERNARYLOGIC_SWITCH(imm8 & 255)
#endif
return simde__m128i_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_ternarylogic_epi32
#define _mm_ternarylogic_epi32(a, b, c, imm8) simde_mm_ternarylogic_epi32(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_mask_ternarylogic_epi32(src, k, a, b, imm8) _mm_mask_ternarylogic_epi32(src, k, a, b, imm8)
#else
#define simde_mm_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm_mask_mov_epi32(src, k, simde_mm_ternarylogic_epi32(src, a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_ternarylogic_epi32
#define _mm_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm_mask_ternarylogic_epi32(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) _mm_maskz_ternarylogic_epi32(k, a, b, c, imm8)
#else
#define simde_mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm_maskz_mov_epi32(k, simde_mm_ternarylogic_epi32(a, b, c, imm8))
#endif
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_ternarylogic_epi32
#define _mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm_maskz_ternarylogic_epi32(k, a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm256_ternarylogic_epi32(a, b, c, imm8) _mm256_ternarylogic_epi32(a, b, c, imm8)
#else
SIMDE_HUGE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_ternarylogic_epi32(simde__m256i a, simde__m256i b, simde__m256i c, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b),
c_ = simde__m256i_to_private(c);
#if defined(SIMDE_TERNARYLOGIC_COMPRESSION)
int to_do, mask;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m256i_private t_;
to_do = imm8;
r_.u64 = a_.u64 ^ a_.u64;
mask = 0xFF;
if ((to_do & mask) == mask) {
r_.u64 = ~r_.u64;
to_do &= ~mask;
}
mask = 0xF0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 = a_.u64;
to_do &= ~mask;
}
mask = 0xCC;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64;
to_do &= ~mask;
}
mask = 0xAA;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= c_.u64;
to_do &= ~mask;
}
mask = 0x0F;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~a_.u64;
to_do &= ~mask;
}
mask = 0x33;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~b_.u64;
to_do &= ~mask;
}
mask = 0x55;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64;
to_do &= ~mask;
}
mask = 0x3C;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 ^ b_.u64;
to_do &= ~mask;
}
mask = 0x5A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 ^ c_.u64;
to_do &= ~mask;
}
mask = 0x66;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64 ^ c_.u64;
to_do &= ~mask;
}
mask = 0xA0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x50;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64 & a_.u64;
to_do &= ~mask;
}
mask = 0x0A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~a_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x88;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x44;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64 & b_.u64;
to_do &= ~mask;
}
mask = 0x22;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~b_.u64 & c_.u64;
to_do &= ~mask;
}
if (to_do & 0xc0) {
t_.u64 = a_.u64 & b_.u64;
if ((to_do & 0xc0) == 0xc0) r_.u64 |= t_.u64;
else if (to_do & 0x80) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x30) {
t_.u64 = ~b_.u64 & a_.u64;
if ((to_do & 0x30) == 0x30) r_.u64 |= t_.u64;
else if (to_do & 0x20) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x0c) {
t_.u64 = ~a_.u64 & b_.u64;
if ((to_do & 0x0c) == 0x0c) r_.u64 |= t_.u64;
else if (to_do & 0x08) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x03) {
t_.u64 = ~(a_.u64 | b_.u64);
if ((to_do & 0x03) == 0x03) r_.u64 |= t_.u64;
else if (to_do & 0x02) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
#else
uint64_t t;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
to_do = imm8;
mask = 0xFF;
if ((to_do & mask) == mask) {
r_.u64[i] = UINT64_MAX;
to_do &= ~mask;
}
else r_.u64[i] = 0;
mask = 0xF0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] = a_.u64[i];
to_do &= ~mask;
}
mask = 0xCC;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i];
to_do &= ~mask;
}
mask = 0xAA;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= c_.u64[i];
to_do &= ~mask;
}
mask = 0x0F;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~a_.u64[i];
to_do &= ~mask;
}
mask = 0x33;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~b_.u64[i];
to_do &= ~mask;
}
mask = 0x55;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i];
to_do &= ~mask;
}
mask = 0x3C;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] ^ b_.u64[i];
to_do &= ~mask;
}
mask = 0x5A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] ^ c_.u64[i];
to_do &= ~mask;
}
mask = 0x66;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i] ^ c_.u64[i];
to_do &= ~mask;
}
mask = 0xA0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x50;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i] & a_.u64[i];
to_do &= ~mask;
}
mask = 0x0A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~a_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x88;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x44;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i] & b_.u64[i];
to_do &= ~mask;
}
mask = 0x22;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~b_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
if (to_do & 0xc0) {
t = a_.u64[i] & b_.u64[i];
if ((to_do & 0xc0) == 0xc0) r_.u64[i] |= t;
else if (to_do & 0x80) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x30) {
t = ~b_.u64[i] & a_.u64[i];
if ((to_do & 0x30) == 0x30) r_.u64[i] |= t;
else if (to_do & 0x20) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x0c) {
t = ~a_.u64[i] & b_.u64[i];
if ((to_do & 0x0c) == 0x0c) r_.u64[i] |= t;
else if (to_do & 0x08) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x03) {
t = ~(a_.u64[i] | b_.u64[i]);
if ((to_do & 0x03) == 0x03) r_.u64[i] |= t;
else if (to_do & 0x02) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
}
#endif
#else
SIMDE_X_TERNARYLOGIC_SWITCH(imm8 & 255)
#endif
return simde__m256i_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm256_ternarylogic_epi32
#define _mm256_ternarylogic_epi32(a, b, c, imm8) simde_mm256_ternarylogic_epi32(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) _mm256_mask_ternarylogic_epi32(src, k, a, b, imm8)
#else
#define simde_mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_ternarylogic_epi32(src, a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_ternarylogic_epi32
#define _mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm256_mask_ternarylogic_epi32(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) _mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8)
#else
#define simde_mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_ternarylogic_epi32(a, b, c, imm8))
#endif
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_ternarylogic_epi32
#define _mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_ternarylogic_epi32(a, b, c, imm8) _mm512_ternarylogic_epi32(a, b, c, imm8)
#else
SIMDE_HUGE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_ternarylogic_epi32(simde__m512i a, simde__m512i b, simde__m512i c, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b),
c_ = simde__m512i_to_private(c);
#if defined(SIMDE_TERNARYLOGIC_COMPRESSION)
int to_do, mask;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m512i_private t_;
to_do = imm8;
r_.u64 = a_.u64 ^ a_.u64;
mask = 0xFF;
if ((to_do & mask) == mask) {
r_.u64 = ~r_.u64;
to_do &= ~mask;
}
mask = 0xF0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 = a_.u64;
to_do &= ~mask;
}
mask = 0xCC;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64;
to_do &= ~mask;
}
mask = 0xAA;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= c_.u64;
to_do &= ~mask;
}
mask = 0x0F;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~a_.u64;
to_do &= ~mask;
}
mask = 0x33;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~b_.u64;
to_do &= ~mask;
}
mask = 0x55;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64;
to_do &= ~mask;
}
mask = 0x3C;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 ^ b_.u64;
to_do &= ~mask;
}
mask = 0x5A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 ^ c_.u64;
to_do &= ~mask;
}
mask = 0x66;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64 ^ c_.u64;
to_do &= ~mask;
}
mask = 0xA0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= a_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x50;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64 & a_.u64;
to_do &= ~mask;
}
mask = 0x0A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~a_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x88;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= b_.u64 & c_.u64;
to_do &= ~mask;
}
mask = 0x44;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~c_.u64 & b_.u64;
to_do &= ~mask;
}
mask = 0x22;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64 |= ~b_.u64 & c_.u64;
to_do &= ~mask;
}
if (to_do & 0xc0) {
t_.u64 = a_.u64 & b_.u64;
if ((to_do & 0xc0) == 0xc0) r_.u64 |= t_.u64;
else if (to_do & 0x80) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x30) {
t_.u64 = ~b_.u64 & a_.u64;
if ((to_do & 0x30) == 0x30) r_.u64 |= t_.u64;
else if (to_do & 0x20) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x0c) {
t_.u64 = ~a_.u64 & b_.u64;
if ((to_do & 0x0c) == 0x0c) r_.u64 |= t_.u64;
else if (to_do & 0x08) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
if (to_do & 0x03) {
t_.u64 = ~(a_.u64 | b_.u64);
if ((to_do & 0x03) == 0x03) r_.u64 |= t_.u64;
else if (to_do & 0x02) r_.u64 |= c_.u64 & t_.u64;
else r_.u64 |= ~c_.u64 & t_.u64;
}
#else
uint64_t t;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
to_do = imm8;
mask = 0xFF;
if ((to_do & mask) == mask) {
r_.u64[i] = UINT64_MAX;
to_do &= ~mask;
}
else r_.u64[i] = 0;
mask = 0xF0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] = a_.u64[i];
to_do &= ~mask;
}
mask = 0xCC;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i];
to_do &= ~mask;
}
mask = 0xAA;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= c_.u64[i];
to_do &= ~mask;
}
mask = 0x0F;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~a_.u64[i];
to_do &= ~mask;
}
mask = 0x33;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~b_.u64[i];
to_do &= ~mask;
}
mask = 0x55;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i];
to_do &= ~mask;
}
mask = 0x3C;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] ^ b_.u64[i];
to_do &= ~mask;
}
mask = 0x5A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] ^ c_.u64[i];
to_do &= ~mask;
}
mask = 0x66;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i] ^ c_.u64[i];
to_do &= ~mask;
}
mask = 0xA0;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= a_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x50;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i] & a_.u64[i];
to_do &= ~mask;
}
mask = 0x0A;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~a_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x88;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= b_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
mask = 0x44;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~c_.u64[i] & b_.u64[i];
to_do &= ~mask;
}
mask = 0x22;
if ((to_do & mask) && ((imm8 & mask) == mask)) {
r_.u64[i] |= ~b_.u64[i] & c_.u64[i];
to_do &= ~mask;
}
if (to_do & 0xc0) {
t = a_.u64[i] & b_.u64[i];
if ((to_do & 0xc0) == 0xc0) r_.u64[i] |= t;
else if (to_do & 0x80) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x30) {
t = ~b_.u64[i] & a_.u64[i];
if ((to_do & 0x30) == 0x30) r_.u64[i] |= t;
else if (to_do & 0x20) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x0c) {
t = ~a_.u64[i] & b_.u64[i];
if ((to_do & 0x0c) == 0x0c) r_.u64[i] |= t;
else if (to_do & 0x08) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
if (to_do & 0x03) {
t = ~(a_.u64[i] | b_.u64[i]);
if ((to_do & 0x03) == 0x03) r_.u64[i] |= t;
else if (to_do & 0x02) r_.u64[i] |= c_.u64[i] & t;
else r_.u64[i] |= ~c_.u64[i] & t;
}
}
#endif
#else
SIMDE_X_TERNARYLOGIC_SWITCH(imm8 & 255)
#endif
return simde__m512i_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_ternarylogic_epi32
#define _mm512_ternarylogic_epi32(a, b, c, imm8) simde_mm512_ternarylogic_epi32(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) _mm512_mask_ternarylogic_epi32(src, k, a, b, imm8)
#else
#define simde_mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_ternarylogic_epi32(src, a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_ternarylogic_epi32
#define _mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm512_mask_ternarylogic_epi32(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) _mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8)
#else
#define simde_mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_ternarylogic_epi32(a, b, c, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_ternarylogic_epi32
#define _mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_ternarylogic_epi64(a, b, c, imm8) _mm_ternarylogic_epi64(a, b, c, imm8)
#else
#define simde_mm_ternarylogic_epi64(a, b, c, imm8) simde_mm_ternarylogic_epi32(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_ternarylogic_epi64
#define _mm_ternarylogic_epi64(a, b, c, imm8) simde_mm_ternarylogic_epi64(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_mask_ternarylogic_epi64(src, k, a, b, imm8) _mm_mask_ternarylogic_epi64(src, k, a, b, imm8)
#else
#define simde_mm_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm_mask_mov_epi64(src, k, simde_mm_ternarylogic_epi64(src, a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_ternarylogic_epi64
#define _mm_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm_mask_ternarylogic_epi64(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) _mm_maskz_ternarylogic_epi64(k, a, b, c, imm8)
#else
#define simde_mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm_maskz_mov_epi64(k, simde_mm_ternarylogic_epi64(a, b, c, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_ternarylogic_epi64
#define _mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm_maskz_ternarylogic_epi64(k, a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_ternarylogic_epi64(a, b, c, imm8) _mm256_ternarylogic_epi64(a, b, c, imm8)
#else
#define simde_mm256_ternarylogic_epi64(a, b, c, imm8) simde_mm256_ternarylogic_epi32(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_ternarylogic_epi64
#define _mm256_ternarylogic_epi64(a, b, c, imm8) simde_mm256_ternarylogic_epi64(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) _mm256_mask_ternarylogic_epi64(src, k, a, b, imm8)
#else
#define simde_mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_ternarylogic_epi64(src, a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_ternarylogic_epi64
#define _mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm256_mask_ternarylogic_epi64(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) _mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8)
#else
#define simde_mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_ternarylogic_epi64(a, b, c, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_ternarylogic_epi64
#define _mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_ternarylogic_epi64(a, b, c, imm8) _mm512_ternarylogic_epi64(a, b, c, imm8)
#else
#define simde_mm512_ternarylogic_epi64(a, b, c, imm8) simde_mm512_ternarylogic_epi32(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_ternarylogic_epi64
#define _mm512_ternarylogic_epi64(a, b, c, imm8) simde_mm512_ternarylogic_epi64(a, b, c, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) _mm512_mask_ternarylogic_epi64(src, k, a, b, imm8)
#else
#define simde_mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_ternarylogic_epi64(src, a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_ternarylogic_epi64
#define _mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm512_mask_ternarylogic_epi64(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) _mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8)
#else
#define simde_mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_ternarylogic_epi64(a, b, c, imm8))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_ternarylogic_epi64
#define _mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_TERNARYLOGIC_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/mulhrs.h | .h | 2,272 | 66 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_MULHRS_H)
#define SIMDE_X86_AVX512_MULHRS_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mulhrs_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mulhrs_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, (((HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i])) + 0x4000) >> 15));
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mulhrs_epi16
#define _mm512_mulhrs_epi16(a, b) simde_mm512_mulhrs_epi16(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_MULHRS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/and.h | .h | 9,570 | 306 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_AND_H)
#define SIMDE_X86_AVX512_AND_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_and_pd (simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_and_pd(a, b);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);
#if defined(SIMDE_X86_AVX_NATIVE)
r_.m256d[0] = simde_mm256_and_pd(a_.m256d[0], b_.m256d[0]);
r_.m256d[1] = simde_mm256_and_pd(a_.m256d[1], b_.m256d[1]);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] & b_.i32f[i];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_and_pd
#define _mm512_and_pd(a, b) simde_mm512_and_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_and_ps (simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_and_ps(a, b);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);
#if defined(SIMDE_X86_AVX_NATIVE)
r_.m256[0] = simde_mm256_and_ps(a_.m256[0], b_.m256[0]);
r_.m256[1] = simde_mm256_and_ps(a_.m256[1], b_.m256[1]);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] & b_.i32f[i];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_and_ps
#define _mm512_and_ps(a, b) simde_mm512_and_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_and_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_mask_and_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_and_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_and_ps
#define _mm512_mask_and_ps(src, k, a, b) simde_mm512_mask_and_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_and_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_maskz_and_ps(k, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_and_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_and_ps
#define _mm512_maskz_and_ps(k, a, b) simde_mm512_maskz_and_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_and_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_mask_and_pd(src, k, a, b);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_and_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_and_pd
#define _mm512_mask_and_pd(src, k, a, b) simde_mm512_mask_and_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_and_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_maskz_and_pd(k, a, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_and_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_and_pd
#define _mm512_maskz_and_pd(k, a, b) simde_mm512_maskz_and_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_and_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_and_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_and_epi32
#define _mm512_and_epi32(a, b) simde_mm512_and_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_and_epi32(simde__m512i src, simde__mmask16 k, simde__m512i v2, simde__m512i v3) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_and_epi32(src, k, v2, v3);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_and_epi32(v2, v3));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_and_epi32
#define _mm512_mask_and_epi32(src, k, v2, v3) simde_mm512_mask_and_epi32(src, k, v2, v3)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_and_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_and_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_and_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_and_epi32
#define _mm512_maskz_and_epi32(k, a, b) simde_mm512_maskz_and_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_and_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_and_epi64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 & b_.i64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] & b_.i64[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_and_epi64
#define _mm512_and_epi64(a, b) simde_mm512_and_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_and_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_and_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_and_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_and_epi64
#define _mm512_mask_and_epi64(src, k, a, b) simde_mm512_mask_and_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_and_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_and_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_and_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_and_epi64
#define _mm512_maskz_and_epi64(k, a, b) simde_mm512_maskz_and_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_and_si512 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_and_si512(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_and_si256(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_and_si256(a_.m256i[1], b_.m256i[1]);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_and_si512
#define _mm512_and_si512(a, b) simde_mm512_and_si512(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_AND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/permutex2var.h | .h | 69,833 | 1,646 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_PERMUTEX2VAR_H)
#define SIMDE_X86_AVX512_PERMUTEX2VAR_H
#include "types.h"
#include "and.h"
#include "andnot.h"
#include "blend.h"
#include "mov.h"
#include "or.h"
#include "set1.h"
#include "slli.h"
#include "srli.h"
#include "test.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
/* The following generic code avoids many, nearly identical, repetitions of fairly complex code.
* If the compiler optimizes well, in particular extracting invariant code from loops
* and simplifying code involving constants passed as arguments, it should not be
* significantly slower than specific code.
* Note that when the original vector contains few elements, these implementations
* may not be faster than portable code.
*/
#if defined(SIMDE_X86_SSSE3_NATIVE) || defined(SIMDE_ARM_NEON_A64V8_NATIVE) || defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_WASM_SIMD128_NATIVE)
#define SIMDE_X_PERMUTEX2VAR_USE_GENERIC
#endif
#if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_permutex2var128 (const simde__m128i *a, const simde__m128i idx, const simde__m128i *b, const unsigned int log2_index_size, const unsigned int log2_data_length) {
const int idx_mask = (1 << (5 - log2_index_size + log2_data_length)) - 1;
#if defined(SIMDE_X86_SSE3_NATIVE)
__m128i ra, rb, t, test, select, index;
const __m128i sixteen = _mm_set1_epi8(16);
/* Avoid the mullo intrinsics which have high latency (and the 32-bit one requires SSE4.1) */
switch (log2_index_size) {
default: /* Avoid uninitialized variable warning/error */
case 0:
index = _mm_and_si128(idx, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, idx_mask)));
break;
case 1:
index = _mm_and_si128(idx, _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, idx_mask)));
index = _mm_slli_epi32(index, 1);
t = _mm_slli_epi32(index, 8);
index = _mm_or_si128(index, t);
index = _mm_add_epi16(index, _mm_set1_epi16(0x0100));
break;
case 2:
index = _mm_and_si128(idx, _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, idx_mask)));
index = _mm_slli_epi32(index, 2);
t = _mm_slli_epi32(index, 8);
index = _mm_or_si128(index, t);
t = _mm_slli_epi32(index, 16);
index = _mm_or_si128(index, t);
index = _mm_add_epi32(index, _mm_set1_epi32(0x03020100));
break;
}
test = index;
index = _mm_and_si128(index, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << (4 + log2_data_length)) - 1)));
test = _mm_cmpgt_epi8(test, index);
ra = _mm_shuffle_epi8(a[0], index);
rb = _mm_shuffle_epi8(b[0], index);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
SIMDE_VECTORIZE
for (int i = 1 ; i < (1 << log2_data_length) ; i++) {
select = _mm_cmplt_epi8(index, sixteen);
index = _mm_sub_epi8(index, sixteen);
ra = _mm_blendv_epi8(_mm_shuffle_epi8(a[i], index), ra, select);
rb = _mm_blendv_epi8(_mm_shuffle_epi8(b[i], index), rb, select);
}
return _mm_blendv_epi8(ra, rb, test);
#else
SIMDE_VECTORIZE
for (int i = 1 ; i < (1 << log2_data_length) ; i++) {
select = _mm_cmplt_epi8(index, sixteen);
index = _mm_sub_epi8(index, sixteen);
ra = _mm_or_si128(_mm_andnot_si128(select, _mm_shuffle_epi8(a[i], index)), _mm_and_si128(select, ra));
rb = _mm_or_si128(_mm_andnot_si128(select, _mm_shuffle_epi8(b[i], index)), _mm_and_si128(select, rb));
}
return _mm_or_si128(_mm_andnot_si128(test, ra), _mm_and_si128(test, rb));
#endif
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x16_t index, r;
uint16x8_t index16;
uint32x4_t index32;
uint8x16x2_t table2_a, table2_b;
uint8x16x4_t table4_a, table4_b;
switch (log2_index_size) {
case 0:
index = vandq_u8(simde__m128i_to_neon_u8(idx), vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, idx_mask)));
break;
case 1:
index16 = vandq_u16(simde__m128i_to_neon_u16(idx), vdupq_n_u16(HEDLEY_STATIC_CAST(uint16_t, idx_mask)));
index16 = vmulq_n_u16(index16, 0x0202);
index16 = vaddq_u16(index16, vdupq_n_u16(0x0100));
index = vreinterpretq_u8_u16(index16);
break;
case 2:
index32 = vandq_u32(simde__m128i_to_neon_u32(idx), vdupq_n_u32(HEDLEY_STATIC_CAST(uint32_t, idx_mask)));
index32 = vmulq_n_u32(index32, 0x04040404);
index32 = vaddq_u32(index32, vdupq_n_u32(0x03020100));
index = vreinterpretq_u8_u32(index32);
break;
}
uint8x16_t mask = vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, (1 << (4 + log2_data_length)) - 1));
switch (log2_data_length) {
case 0:
r = vqtbx1q_u8(vqtbl1q_u8(simde__m128i_to_neon_u8(b[0]), vandq_u8(index, mask)), simde__m128i_to_neon_u8(a[0]), index);
break;
case 1:
table2_a.val[0] = simde__m128i_to_neon_u8(a[0]);
table2_a.val[1] = simde__m128i_to_neon_u8(a[1]);
table2_b.val[0] = simde__m128i_to_neon_u8(b[0]);
table2_b.val[1] = simde__m128i_to_neon_u8(b[1]);
r = vqtbx2q_u8(vqtbl2q_u8(table2_b, vandq_u8(index, mask)), table2_a, index);
break;
case 2:
table4_a.val[0] = simde__m128i_to_neon_u8(a[0]);
table4_a.val[1] = simde__m128i_to_neon_u8(a[1]);
table4_a.val[2] = simde__m128i_to_neon_u8(a[2]);
table4_a.val[3] = simde__m128i_to_neon_u8(a[3]);
table4_b.val[0] = simde__m128i_to_neon_u8(b[0]);
table4_b.val[1] = simde__m128i_to_neon_u8(b[1]);
table4_b.val[2] = simde__m128i_to_neon_u8(b[2]);
table4_b.val[3] = simde__m128i_to_neon_u8(b[3]);
r = vqtbx4q_u8(vqtbl4q_u8(table4_b, vandq_u8(index, mask)), table4_a, index);
break;
}
return simde__m128i_from_neon_u8(r);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r, ra, rb, t, index, s, thirty_two = vec_splats(HEDLEY_STATIC_CAST(uint8_t, 32));
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) index16;
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) temp32, index32;
SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) select, test;
switch (log2_index_size) {
default: /* Avoid uninitialized variable warning/error */
case 0:
index = vec_and(simde__m128i_to_altivec_u8(idx), vec_splats(HEDLEY_STATIC_CAST(uint8_t, idx_mask)));
break;
case 1:
index16 = simde__m128i_to_altivec_u16(idx);
index16 = vec_and(index16, vec_splats(HEDLEY_STATIC_CAST(uint16_t, idx_mask)));
index16 = vec_mladd(index16, vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0202)), vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0100)));
index = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index16);
break;
case 2:
index32 = simde__m128i_to_altivec_u32(idx);
index32 = vec_and(index32, vec_splats(HEDLEY_STATIC_CAST(uint32_t, idx_mask)));
/* Multiply index32 by 0x04040404; unfortunately vec_mul isn't available so (mis)use 16-bit vec_mladd */
temp32 = vec_sl(index32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 16)));
index32 = vec_add(index32, temp32);
index32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
vec_mladd(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), index32),
vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x0404)),
vec_splat_u16(0)));
index32 = vec_add(index32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0x03020100)));
index = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index32);
break;
}
if (log2_data_length == 0) {
r = vec_perm(simde__m128i_to_altivec_u8(a[0]), simde__m128i_to_altivec_u8(b[0]), HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), index));
}
else {
s = index;
index = vec_and(index, vec_splats(HEDLEY_STATIC_CAST(uint8_t, (1 << (4 + log2_data_length)) - 1)));
test = vec_cmpgt(s, index);
ra = vec_perm(simde__m128i_to_altivec_u8(a[0]), simde__m128i_to_altivec_u8(a[1]), index);
rb = vec_perm(simde__m128i_to_altivec_u8(b[0]), simde__m128i_to_altivec_u8(b[1]), index);
SIMDE_VECTORIZE
for (int i = 2 ; i < (1 << log2_data_length) ; i += 2) {
select = vec_cmplt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), index),
HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), thirty_two));
index = vec_sub(index, thirty_two);
t = vec_perm(simde__m128i_to_altivec_u8(a[i]), simde__m128i_to_altivec_u8(a[i + 1]), index);
ra = vec_sel(t, ra, select);
t = vec_perm(simde__m128i_to_altivec_u8(b[i]), simde__m128i_to_altivec_u8(b[i + 1]), index);
rb = vec_sel(t, rb, select);
}
r = vec_sel(ra, rb, test);
}
return simde__m128i_from_altivec_u8(r);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sixteen = wasm_i8x16_splat(16);
v128_t index = simde__m128i_to_wasm_v128(idx);
switch (log2_index_size) {
case 0:
index = wasm_v128_and(index, wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, idx_mask)));
break;
case 1:
index = wasm_v128_and(index, wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, idx_mask)));
index = wasm_i16x8_mul(index, wasm_i16x8_splat(0x0202));
index = wasm_i16x8_add(index, wasm_i16x8_splat(0x0100));
break;
case 2:
index = wasm_v128_and(index, wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, idx_mask)));
index = wasm_i32x4_mul(index, wasm_i32x4_splat(0x04040404));
index = wasm_i32x4_add(index, wasm_i32x4_splat(0x03020100));
break;
}
v128_t r = wasm_i8x16_swizzle(simde__m128i_to_wasm_v128(a[0]), index);
SIMDE_VECTORIZE
for (int i = 1 ; i < (1 << log2_data_length) ; i++) {
index = wasm_i8x16_sub(index, sixteen);
r = wasm_v128_or(r, wasm_i8x16_swizzle(simde__m128i_to_wasm_v128(a[i]), index));
}
SIMDE_VECTORIZE
for (int i = 0 ; i < (1 << log2_data_length) ; i++) {
index = wasm_i8x16_sub(index, sixteen);
r = wasm_v128_or(r, wasm_i8x16_swizzle(simde__m128i_to_wasm_v128(b[i]), index));
}
return simde__m128i_from_wasm_v128(r);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_x_permutex2var (simde__m128i *r, const simde__m128i *a, const simde__m128i *idx, const simde__m128i *b, const unsigned int log2_index_size, const unsigned int log2_data_length) {
SIMDE_VECTORIZE
for (int i = 0 ; i < (1 << log2_data_length) ; i++) {
r[i] = simde_x_permutex2var128(a, idx[i], b, log2_index_size, log2_data_length);
}
}
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_permutex2var_epi16 (simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_permutex2var_epi16(a, idx, b);
#elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde__m128i r;
simde_x_permutex2var(&r, &a, &idx, &b, 1, 0);
return r;
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
idx_ = simde__m128i_to_private(idx),
b_ = simde__m128i_to_private(b),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((idx_.i16[i] & 8) ? b_ : a_).i16[idx_.i16[i] & 7];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_permutex2var_epi16
#define _mm_permutex2var_epi16(a, idx, b) simde_mm_permutex2var_epi16(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_permutex2var_epi16 (simde__m128i a, simde__mmask8 k, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_permutex2var_epi16(a, k, idx, b);
#else
return simde_mm_mask_mov_epi16(a, k, simde_mm_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_permutex2var_epi16
#define _mm_mask_permutex2var_epi16(a, k, idx, b) simde_mm_mask_permutex2var_epi16(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask2_permutex2var_epi16 (simde__m128i a, simde__m128i idx, simde__mmask8 k, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask2_permutex2var_epi16(a, idx, k, b);
#else
return simde_mm_mask_mov_epi16(idx, k, simde_mm_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask2_permutex2var_epi16
#define _mm_mask2_permutex2var_epi16(a, idx, k, b) simde_mm_mask2_permutex2var_epi16(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_permutex2var_epi16 (simde__mmask8 k, simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_permutex2var_epi16(k, a, idx, b);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_permutex2var_epi16
#define _mm_maskz_permutex2var_epi16(k, a, idx, b) simde_mm_maskz_permutex2var_epi16(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_permutex2var_epi32 (simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_permutex2var_epi32(a, idx, b);
#elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC) /* This may not be faster than the portable version */
simde__m128i r;
simde_x_permutex2var(&r, &a, &idx, &b, 2, 0);
return r;
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
idx_ = simde__m128i_to_private(idx),
b_ = simde__m128i_to_private(b),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((idx_.i32[i] & 4) ? b_ : a_).i32[idx_.i32[i] & 3];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_permutex2var_epi32
#define _mm_permutex2var_epi32(a, idx, b) simde_mm_permutex2var_epi32(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_permutex2var_epi32 (simde__m128i a, simde__mmask8 k, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_permutex2var_epi32(a, k, idx, b);
#else
return simde_mm_mask_mov_epi32(a, k, simde_mm_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_permutex2var_epi32
#define _mm_mask_permutex2var_epi32(a, k, idx, b) simde_mm_mask_permutex2var_epi32(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask2_permutex2var_epi32 (simde__m128i a, simde__m128i idx, simde__mmask8 k, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask2_permutex2var_epi32(a, idx, k, b);
#else
return simde_mm_mask_mov_epi32(idx, k, simde_mm_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask2_permutex2var_epi32
#define _mm_mask2_permutex2var_epi32(a, idx, k, b) simde_mm_mask2_permutex2var_epi32(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_permutex2var_epi32 (simde__mmask8 k, simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_permutex2var_epi32(k, a, idx, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_permutex2var_epi32
#define _mm_maskz_permutex2var_epi32(k, a, idx, b) simde_mm_maskz_permutex2var_epi32(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_permutex2var_epi64 (simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_permutex2var_epi64(a, idx, b);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
idx_ = simde__m128i_to_private(idx),
b_ = simde__m128i_to_private(b),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((idx_.i64[i] & 2) ? b_ : a_).i64[idx_.i64[i] & 1];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_permutex2var_epi64
#define _mm_permutex2var_epi64(a, idx, b) simde_mm_permutex2var_epi64(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_permutex2var_epi64 (simde__m128i a, simde__mmask8 k, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_permutex2var_epi64(a, k, idx, b);
#else
return simde_mm_mask_mov_epi64(a, k, simde_mm_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_permutex2var_epi64
#define _mm_mask_permutex2var_epi64(a, k, idx, b) simde_mm_mask_permutex2var_epi64(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask2_permutex2var_epi64 (simde__m128i a, simde__m128i idx, simde__mmask8 k, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask2_permutex2var_epi64(a, idx, k, b);
#else
return simde_mm_mask_mov_epi64(idx, k, simde_mm_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask2_permutex2var_epi64
#define _mm_mask2_permutex2var_epi64(a, idx, k, b) simde_mm_mask2_permutex2var_epi64(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_permutex2var_epi64 (simde__mmask8 k, simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_permutex2var_epi64(k, a, idx, b);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_permutex2var_epi64
#define _mm_maskz_permutex2var_epi64(k, a, idx, b) simde_mm_maskz_permutex2var_epi64(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_permutex2var_epi8 (simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_permutex2var_epi8(a, idx, b);
#elif defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtepi32_epi8(_mm512_permutex2var_epi32(_mm512_cvtepu8_epi32(a), _mm512_cvtepu8_epi32(idx), _mm512_cvtepu8_epi32(b)));
#elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde__m128i r;
simde_x_permutex2var(&r, &a, &idx, &b, 0, 0);
return r;
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
idx_ = simde__m128i_to_private(idx),
b_ = simde__m128i_to_private(b),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((idx_.i8[i] & 0x10) ? b_ : a_).i8[idx_.i8[i] & 0x0F];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_permutex2var_epi8
#define _mm_permutex2var_epi8(a, idx, b) simde_mm_permutex2var_epi8(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_permutex2var_epi8 (simde__m128i a, simde__mmask16 k, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_permutex2var_epi8(a, k, idx, b);
#else
return simde_mm_mask_mov_epi8(a, k, simde_mm_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_permutex2var_epi8
#define _mm_mask_permutex2var_epi8(a, k, idx, b) simde_mm_mask_permutex2var_epi8(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask2_permutex2var_epi8 (simde__m128i a, simde__m128i idx, simde__mmask16 k, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask2_permutex2var_epi8(a, idx, k, b);
#else
return simde_mm_mask_mov_epi8(idx, k, simde_mm_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask2_permutex2var_epi8
#define _mm_mask2_permutex2var_epi8(a, idx, k, b) simde_mm_mask2_permutex2var_epi8(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_permutex2var_epi8 (simde__mmask16 k, simde__m128i a, simde__m128i idx, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_permutex2var_epi8(k, a, idx, b);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_permutex2var_epi8
#define _mm_maskz_permutex2var_epi8(k, a, idx, b) simde_mm_maskz_permutex2var_epi8(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_permutex2var_pd (simde__m128d a, simde__m128i idx, simde__m128d b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_permutex2var_pd(a, idx, b);
#else
return simde_mm_castsi128_pd(simde_mm_permutex2var_epi64(simde_mm_castpd_si128(a), idx, simde_mm_castpd_si128(b)));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_permutex2var_pd
#define _mm_permutex2var_pd(a, idx, b) simde_mm_permutex2var_pd(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_permutex2var_pd (simde__m128d a, simde__mmask8 k, simde__m128i idx, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_permutex2var_pd(a, k, idx, b);
#else
return simde_mm_mask_mov_pd(a, k, simde_mm_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_permutex2var_pd
#define _mm_mask_permutex2var_pd(a, k, idx, b) simde_mm_mask_permutex2var_pd(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask2_permutex2var_pd (simde__m128d a, simde__m128i idx, simde__mmask8 k, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask2_permutex2var_pd(a, idx, k, b);
#else
return simde_mm_mask_mov_pd(simde_mm_castsi128_pd(idx), k, simde_mm_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask2_permutex2var_pd
#define _mm_mask2_permutex2var_pd(a, idx, k, b) simde_mm_mask2_permutex2var_pd(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_permutex2var_pd (simde__mmask8 k, simde__m128d a, simde__m128i idx, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_permutex2var_pd(k, a, idx, b);
#else
return simde_mm_maskz_mov_pd(k, simde_mm_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_permutex2var_pd
#define _mm_maskz_permutex2var_pd(k, a, idx, b) simde_mm_maskz_permutex2var_pd(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_permutex2var_ps (simde__m128 a, simde__m128i idx, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_permutex2var_ps(a, idx, b);
#else
return simde_mm_castsi128_ps(simde_mm_permutex2var_epi32(simde_mm_castps_si128(a), idx, simde_mm_castps_si128(b)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_permutex2var_ps
#define _mm_permutex2var_ps(a, idx, b) simde_mm_permutex2var_ps(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_permutex2var_ps (simde__m128 a, simde__mmask8 k, simde__m128i idx, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_permutex2var_ps(a, k, idx, b);
#else
return simde_mm_mask_mov_ps(a, k, simde_mm_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_permutex2var_ps
#define _mm_mask_permutex2var_ps(a, k, idx, b) simde_mm_mask_permutex2var_ps(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask2_permutex2var_ps (simde__m128 a, simde__m128i idx, simde__mmask8 k, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask2_permutex2var_ps(a, idx, k, b);
#else
return simde_mm_mask_mov_ps(simde_mm_castsi128_ps(idx), k, simde_mm_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask2_permutex2var_ps
#define _mm_mask2_permutex2var_ps(a, idx, k, b) simde_mm_mask2_permutex2var_ps(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_permutex2var_ps (simde__mmask8 k, simde__m128 a, simde__m128i idx, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_permutex2var_ps(k, a, idx, b);
#else
return simde_mm_maskz_mov_ps(k, simde_mm_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_permutex2var_ps
#define _mm_maskz_permutex2var_ps(k, a, idx, b) simde_mm_maskz_permutex2var_ps(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_permutex2var_epi16 (simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_permutex2var_epi16(a, idx, b);
#elif defined(SIMDE_X86_AVX2_NATIVE)
__m256i hilo, hilo2, hi, lo, idx2, ta, tb, select;
const __m256i ones = _mm256_set1_epi16(1);
idx2 = _mm256_srli_epi32(idx, 1);
ta = _mm256_permutevar8x32_epi32(a, idx2);
tb = _mm256_permutevar8x32_epi32(b, idx2);
select = _mm256_slli_epi32(idx2, 28);
hilo = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
idx2 = _mm256_srli_epi32(idx2, 16);
ta = _mm256_permutevar8x32_epi32(a, idx2);
tb = _mm256_permutevar8x32_epi32(b, idx2);
select = _mm256_slli_epi32(idx2, 28);
hilo2 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
lo = _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo, 0x55);
hi = _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo, 16), 0x55);
select = _mm256_cmpeq_epi16(_mm256_and_si256(idx, ones), ones);
return _mm256_blendv_epi8(lo, hi, select);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
idx_ = simde__m256i_to_private(idx),
b_ = simde__m256i_to_private(b),
r_;
#if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 1, 1);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((idx_.i16[i] & 0x10) ? b_ : a_).i16[idx_.i16[i] & 0x0F];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex2var_epi16
#define _mm256_permutex2var_epi16(a, idx, b) simde_mm256_permutex2var_epi16(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_permutex2var_epi16 (simde__m256i a, simde__mmask16 k, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_permutex2var_epi16(a, k, idx, b);
#else
return simde_mm256_mask_mov_epi16(a, k, simde_mm256_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_permutex2var_epi16
#define _mm256_mask_permutex2var_epi16(a, k, idx, b) simde_mm256_mask_permutex2var_epi16(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask2_permutex2var_epi16 (simde__m256i a, simde__m256i idx, simde__mmask16 k, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask2_permutex2var_epi16(a, idx, k, b);
#else
return simde_mm256_mask_mov_epi16(idx, k, simde_mm256_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask2_permutex2var_epi16
#define _mm256_mask2_permutex2var_epi16(a, idx, k, b) simde_mm256_mask2_permutex2var_epi16(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_permutex2var_epi16 (simde__mmask16 k, simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_permutex2var_epi16(k, a, idx, b);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_permutex2var_epi16
#define _mm256_maskz_permutex2var_epi16(k, a, idx, b) simde_mm256_maskz_permutex2var_epi16(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_permutex2var_epi32 (simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_permutex2var_epi32(a, idx, b);
#elif defined(SIMDE_X86_AVX2_NATIVE)
__m256i ta, tb, select;
ta = _mm256_permutevar8x32_epi32(a, idx);
tb = _mm256_permutevar8x32_epi32(b, idx);
select = _mm256_slli_epi32(idx, 28);
return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
idx_ = simde__m256i_to_private(idx),
b_ = simde__m256i_to_private(b),
r_;
#if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 2, 1);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((idx_.i32[i] & 8) ? b_ : a_).i32[idx_.i32[i] & 7];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex2var_epi32
#define _mm256_permutex2var_epi32(a, idx, b) simde_mm256_permutex2var_epi32(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_permutex2var_epi32 (simde__m256i a, simde__mmask8 k, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_permutex2var_epi32(a, k, idx, b);
#else
return simde_mm256_mask_mov_epi32(a, k, simde_mm256_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_permutex2var_epi32
#define _mm256_mask_permutex2var_epi32(a, k, idx, b) simde_mm256_mask_permutex2var_epi32(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask2_permutex2var_epi32 (simde__m256i a, simde__m256i idx, simde__mmask8 k, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask2_permutex2var_epi32(a, idx, k, b);
#else
return simde_mm256_mask_mov_epi32(idx, k, simde_mm256_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask2_permutex2var_epi32
#define _mm256_mask2_permutex2var_epi32(a, idx, k, b) simde_mm256_mask2_permutex2var_epi32(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_permutex2var_epi32 (simde__mmask8 k, simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_permutex2var_epi32(k, a, idx, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_permutex2var_epi32
#define _mm256_maskz_permutex2var_epi32(k, a, idx, b) simde_mm256_maskz_permutex2var_epi32(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_permutex2var_epi64 (simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_permutex2var_epi64(a, idx, b);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
idx_ = simde__m256i_to_private(idx),
b_ = simde__m256i_to_private(b),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((idx_.i64[i] & 4) ? b_ : a_).i64[idx_.i64[i] & 3];
}
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex2var_epi64
#define _mm256_permutex2var_epi64(a, idx, b) simde_mm256_permutex2var_epi64(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_permutex2var_epi64 (simde__m256i a, simde__mmask8 k, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_permutex2var_epi64(a, k, idx, b);
#else
return simde_mm256_mask_mov_epi64(a, k, simde_mm256_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_permutex2var_epi64
#define _mm256_mask_permutex2var_epi64(a, k, idx, b) simde_mm256_mask_permutex2var_epi64(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask2_permutex2var_epi64 (simde__m256i a, simde__m256i idx, simde__mmask8 k, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask2_permutex2var_epi64(a, idx, k, b);
#else
return simde_mm256_mask_mov_epi64(idx, k, simde_mm256_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask2_permutex2var_epi64
#define _mm256_mask2_permutex2var_epi64(a, idx, k, b) simde_mm256_mask2_permutex2var_epi64(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_permutex2var_epi64 (simde__mmask8 k, simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_permutex2var_epi64(k, a, idx, b);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_permutex2var_epi64
#define _mm256_maskz_permutex2var_epi64(k, a, idx, b) simde_mm256_maskz_permutex2var_epi64(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_permutex2var_epi8 (simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_permutex2var_epi8(a, idx, b);
#elif defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cvtepi16_epi8(_mm512_permutex2var_epi16(_mm512_cvtepu8_epi16(a), _mm512_cvtepu8_epi16(idx), _mm512_cvtepu8_epi16(b)));
#elif defined(SIMDE_X86_AVX2_NATIVE)
__m256i t0, t1, index, select0x10, select0x20, a01, b01;
const __m256i mask = _mm256_set1_epi8(0x3F);
const __m256i a0 = _mm256_permute4x64_epi64(a, (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0));
const __m256i a1 = _mm256_permute4x64_epi64(a, (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0));
const __m256i b0 = _mm256_permute4x64_epi64(b, (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0));
const __m256i b1 = _mm256_permute4x64_epi64(b, (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0));
index = _mm256_and_si256(idx, mask);
t0 = _mm256_shuffle_epi8(a0, index);
t1 = _mm256_shuffle_epi8(a1, index);
select0x10 = _mm256_slli_epi64(index, 3);
a01 = _mm256_blendv_epi8(t0, t1, select0x10);
t0 = _mm256_shuffle_epi8(b0, index);
t1 = _mm256_shuffle_epi8(b1, index);
b01 = _mm256_blendv_epi8(t0, t1, select0x10);
select0x20 = _mm256_slli_epi64(index, 2);
return _mm256_blendv_epi8(a01, b01, select0x20);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
idx_ = simde__m256i_to_private(idx),
b_ = simde__m256i_to_private(b),
r_;
#if defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 0, 1);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((idx_.i8[i] & 0x20) ? b_ : a_).i8[idx_.i8[i] & 0x1F];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex2var_epi8
#define _mm256_permutex2var_epi8(a, idx, b) simde_mm256_permutex2var_epi8(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_permutex2var_epi8 (simde__m256i a, simde__mmask32 k, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_permutex2var_epi8(a, k, idx, b);
#else
return simde_mm256_mask_mov_epi8(a, k, simde_mm256_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_permutex2var_epi8
#define _mm256_mask_permutex2var_epi8(a, k, idx, b) simde_mm256_mask_permutex2var_epi8(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask2_permutex2var_epi8 (simde__m256i a, simde__m256i idx, simde__mmask32 k, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask2_permutex2var_epi8(a, idx, k, b);
#else
return simde_mm256_mask_mov_epi8(idx, k, simde_mm256_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask2_permutex2var_epi8
#define _mm256_mask2_permutex2var_epi8(a, idx, k, b) simde_mm256_mask2_permutex2var_epi8(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_permutex2var_epi8 (simde__mmask32 k, simde__m256i a, simde__m256i idx, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_permutex2var_epi8(k, a, idx, b);
#else
return simde_mm256_maskz_mov_epi8(k, simde_mm256_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_permutex2var_epi8
#define _mm256_maskz_permutex2var_epi8(k, a, idx, b) simde_mm256_maskz_permutex2var_epi8(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_permutex2var_pd (simde__m256d a, simde__m256i idx, simde__m256d b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_permutex2var_pd(a, idx, b);
#else
return simde_mm256_castsi256_pd(simde_mm256_permutex2var_epi64(simde_mm256_castpd_si256(a), idx, simde_mm256_castpd_si256(b)));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex2var_pd
#define _mm256_permutex2var_pd(a, idx, b) simde_mm256_permutex2var_pd(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_permutex2var_pd (simde__m256d a, simde__mmask8 k, simde__m256i idx, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_permutex2var_pd(a, k, idx, b);
#else
return simde_mm256_mask_mov_pd(a, k, simde_mm256_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_permutex2var_pd
#define _mm256_mask_permutex2var_pd(a, k, idx, b) simde_mm256_mask_permutex2var_pd(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask2_permutex2var_pd (simde__m256d a, simde__m256i idx, simde__mmask8 k, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask2_permutex2var_pd(a, idx, k, b);
#else
return simde_mm256_mask_mov_pd(simde_mm256_castsi256_pd(idx), k, simde_mm256_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask2_permutex2var_pd
#define _mm256_mask2_permutex2var_pd(a, idx, k, b) simde_mm256_mask2_permutex2var_pd(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_maskz_permutex2var_pd (simde__mmask8 k, simde__m256d a, simde__m256i idx, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_permutex2var_pd(k, a, idx, b);
#else
return simde_mm256_maskz_mov_pd(k, simde_mm256_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_permutex2var_pd
#define _mm256_maskz_permutex2var_pd(k, a, idx, b) simde_mm256_maskz_permutex2var_pd(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_permutex2var_ps (simde__m256 a, simde__m256i idx, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_permutex2var_ps(a, idx, b);
#else
return simde_mm256_castsi256_ps(simde_mm256_permutex2var_epi32(simde_mm256_castps_si256(a), idx, simde_mm256_castps_si256(b)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_permutex2var_ps
#define _mm256_permutex2var_ps(a, idx, b) simde_mm256_permutex2var_ps(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_permutex2var_ps (simde__m256 a, simde__mmask8 k, simde__m256i idx, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_permutex2var_ps(a, k, idx, b);
#else
return simde_mm256_mask_mov_ps(a, k, simde_mm256_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_permutex2var_ps
#define _mm256_mask_permutex2var_ps(a, k, idx, b) simde_mm256_mask_permutex2var_ps(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask2_permutex2var_ps (simde__m256 a, simde__m256i idx, simde__mmask8 k, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask2_permutex2var_ps(a, idx, k, b);
#else
return simde_mm256_mask_mov_ps(simde_mm256_castsi256_ps(idx), k, simde_mm256_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask2_permutex2var_ps
#define _mm256_mask2_permutex2var_ps(a, idx, k, b) simde_mm256_mask2_permutex2var_ps(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_permutex2var_ps (simde__mmask8 k, simde__m256 a, simde__m256i idx, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_permutex2var_ps(k, a, idx, b);
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_permutex2var_ps
#define _mm256_maskz_permutex2var_ps(k, a, idx, b) simde_mm256_maskz_permutex2var_ps(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_permutex2var_epi16 (simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_permutex2var_epi16(a, idx, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
idx_ = simde__m512i_to_private(idx),
b_ = simde__m512i_to_private(b),
r_;
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i hilo, hilo1, hilo2, hi, lo, idx1, idx2, ta, tb, select;
const __m256i ones = _mm256_set1_epi16(1);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) {
idx1 = idx_.m256i[i];
idx2 = _mm256_srli_epi32(idx1, 1);
select = _mm256_slli_epi32(idx2, 27);
ta = _mm256_permutevar8x32_epi32(a_.m256i[0], idx2);
tb = _mm256_permutevar8x32_epi32(b_.m256i[0], idx2);
hilo = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
ta = _mm256_permutevar8x32_epi32(a_.m256i[1], idx2);
tb = _mm256_permutevar8x32_epi32(b_.m256i[1], idx2);
hilo1 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
select = _mm256_add_epi32(select, select);
hilo1 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(hilo),
_mm256_castsi256_ps(hilo1),
_mm256_castsi256_ps(select)));
idx2 = _mm256_srli_epi32(idx2, 16);
select = _mm256_slli_epi32(idx2, 27);
ta = _mm256_permutevar8x32_epi32(a_.m256i[0], idx2);
tb = _mm256_permutevar8x32_epi32(b_.m256i[0], idx2);
hilo = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
ta = _mm256_permutevar8x32_epi32(a_.m256i[1], idx2);
tb = _mm256_permutevar8x32_epi32(b_.m256i[1], idx2);
hilo2 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(ta),
_mm256_castsi256_ps(tb),
_mm256_castsi256_ps(select)));
select = _mm256_add_epi32(select, select);
hilo2 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(hilo),
_mm256_castsi256_ps(hilo2),
_mm256_castsi256_ps(select)));
lo = _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo1, 0x55);
hi = _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo1, 16), 0x55);
select = _mm256_cmpeq_epi16(_mm256_and_si256(idx1, ones), ones);
r_.m256i[i] = _mm256_blendv_epi8(lo, hi, select);
}
#elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 1, 2);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((idx_.i16[i] & 0x20) ? b_ : a_).i16[idx_.i16[i] & 0x1F];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex2var_epi16
#define _mm512_permutex2var_epi16(a, idx, b) simde_mm512_permutex2var_epi16(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_permutex2var_epi16 (simde__m512i a, simde__mmask32 k, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_permutex2var_epi16(a, k, idx, b);
#else
return simde_mm512_mask_mov_epi16(a, k, simde_mm512_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex2var_epi16
#define _mm512_mask_permutex2var_epi16(a, k, idx, b) simde_mm512_mask_permutex2var_epi16(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask2_permutex2var_epi16 (simde__m512i a, simde__m512i idx, simde__mmask32 k, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask2_permutex2var_epi16(a, idx, k, b);
#else
return simde_mm512_mask_mov_epi16(idx, k, simde_mm512_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask2_permutex2var_epi16
#define _mm512_mask2_permutex2var_epi16(a, idx, k, b) simde_mm512_mask2_permutex2var_epi16(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_permutex2var_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_permutex2var_epi16(k, a, idx, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_permutex2var_epi16(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_permutex2var_epi16
#define _mm512_maskz_permutex2var_epi16(k, a, idx, b) simde_mm512_maskz_permutex2var_epi16(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_permutex2var_epi32(a, idx, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
idx_ = simde__m512i_to_private(idx),
b_ = simde__m512i_to_private(b),
r_;
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i index, t0, t1, a01, b01, select;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) {
index = idx_.m256i[i];
t0 = _mm256_permutevar8x32_epi32(a_.m256i[0], index);
t1 = _mm256_permutevar8x32_epi32(a_.m256i[1], index);
select = _mm256_slli_epi32(index, 28);
a01 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(t0),
_mm256_castsi256_ps(t1),
_mm256_castsi256_ps(select)));
t0 = _mm256_permutevar8x32_epi32(b_.m256i[0], index);
t1 = _mm256_permutevar8x32_epi32(b_.m256i[1], index);
b01 = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(t0),
_mm256_castsi256_ps(t1),
_mm256_castsi256_ps(select)));
select = _mm256_slli_epi32(index, 27);
r_.m256i[i] = _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(a01),
_mm256_castsi256_ps(b01),
_mm256_castsi256_ps(select)));
}
#elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 2, 2);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((idx_.i32[i] & 0x10) ? b_ : a_).i32[idx_.i32[i] & 0x0F];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex2var_epi32
#define _mm512_permutex2var_epi32(a, idx, b) simde_mm512_permutex2var_epi32(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_permutex2var_epi32 (simde__m512i a, simde__mmask16 k, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_permutex2var_epi32(a, k, idx, b);
#else
return simde_mm512_mask_mov_epi32(a, k, simde_mm512_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex2var_epi32
#define _mm512_mask_permutex2var_epi32(a, k, idx, b) simde_mm512_mask_permutex2var_epi32(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask2_permutex2var_epi32 (simde__m512i a, simde__m512i idx, simde__mmask16 k, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask2_permutex2var_epi32(a, idx, k, b);
#else
return simde_mm512_mask_mov_epi32(idx, k, simde_mm512_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask2_permutex2var_epi32
#define _mm512_mask2_permutex2var_epi32(a, idx, k, b) simde_mm512_mask2_permutex2var_epi32(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_permutex2var_epi32 (simde__mmask16 k, simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_permutex2var_epi32(k, a, idx, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_permutex2var_epi32(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_permutex2var_epi32
#define _mm512_maskz_permutex2var_epi32(k, a, idx, b) simde_mm512_maskz_permutex2var_epi32(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_permutex2var_epi64(a, idx, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
idx_ = simde__m512i_to_private(idx),
b_ = simde__m512i_to_private(b),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((idx_.i64[i] & 8) ? b_ : a_).i64[idx_.i64[i] & 7];
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex2var_epi64
#define _mm512_permutex2var_epi64(a, idx, b) simde_mm512_permutex2var_epi64(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_permutex2var_epi64 (simde__m512i a, simde__mmask8 k, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_permutex2var_epi64(a, k, idx, b);
#else
return simde_mm512_mask_mov_epi64(a, k, simde_mm512_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex2var_epi64
#define _mm512_mask_permutex2var_epi64(a, k, idx, b) simde_mm512_mask_permutex2var_epi64(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask2_permutex2var_epi64 (simde__m512i a, simde__m512i idx, simde__mmask8 k, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask2_permutex2var_epi64(a, idx, k, b);
#else
return simde_mm512_mask_mov_epi64(idx, k, simde_mm512_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask2_permutex2var_epi64
#define _mm512_mask2_permutex2var_epi64(a, idx, k, b) simde_mm512_mask2_permutex2var_epi64(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_permutex2var_epi64 (simde__mmask8 k, simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_permutex2var_epi64(k, a, idx, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_permutex2var_epi64(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_permutex2var_epi64
#define _mm512_maskz_permutex2var_epi64(k, a, idx, b) simde_mm512_maskz_permutex2var_epi64(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_permutex2var_epi8 (simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE)
return _mm512_permutex2var_epi8(a, idx, b);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
__m512i hilo, hi, lo, hi2, lo2, idx2;
const __m512i ones = _mm512_set1_epi8(1);
const __m512i low_bytes = _mm512_set1_epi16(0x00FF);
idx2 = _mm512_srli_epi16(idx, 1);
hilo = _mm512_permutex2var_epi16(a, idx2, b);
__mmask64 mask = _mm512_test_epi8_mask(idx, ones);
lo = _mm512_and_si512(hilo, low_bytes);
hi = _mm512_srli_epi16(hilo, 8);
idx2 = _mm512_srli_epi16(idx, 9);
hilo = _mm512_permutex2var_epi16(a, idx2, b);
lo2 = _mm512_slli_epi16(hilo, 8);
hi2 = _mm512_andnot_si512(low_bytes, hilo);
lo = _mm512_or_si512(lo, lo2);
hi = _mm512_or_si512(hi, hi2);
return _mm512_mask_blend_epi8(mask, lo, hi);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
idx_ = simde__m512i_to_private(idx),
b_ = simde__m512i_to_private(b),
r_;
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i t0, t1, index, select0x10, select0x20, select0x40, t01, t23, a0123, b0123;
const __m256i mask = _mm256_set1_epi8(0x7F);
const __m256i a0 = _mm256_permute4x64_epi64(a_.m256i[0], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0));
const __m256i a1 = _mm256_permute4x64_epi64(a_.m256i[0], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0));
const __m256i a2 = _mm256_permute4x64_epi64(a_.m256i[1], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0));
const __m256i a3 = _mm256_permute4x64_epi64(a_.m256i[1], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0));
const __m256i b0 = _mm256_permute4x64_epi64(b_.m256i[0], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0));
const __m256i b1 = _mm256_permute4x64_epi64(b_.m256i[0], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0));
const __m256i b2 = _mm256_permute4x64_epi64(b_.m256i[1], (1 << 6) + (0 << 4) + (1 << 2) + (0 << 0));
const __m256i b3 = _mm256_permute4x64_epi64(b_.m256i[1], (3 << 6) + (2 << 4) + (3 << 2) + (2 << 0));
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i_private) / sizeof(r_.m256i_private[0])) ; i++) {
index = _mm256_and_si256(idx_.m256i[i], mask);
t0 = _mm256_shuffle_epi8(a0, index);
t1 = _mm256_shuffle_epi8(a1, index);
select0x10 = _mm256_slli_epi64(index, 3);
t01 = _mm256_blendv_epi8(t0, t1, select0x10);
t0 = _mm256_shuffle_epi8(a2, index);
t1 = _mm256_shuffle_epi8(a3, index);
t23 = _mm256_blendv_epi8(t0, t1, select0x10);
select0x20 = _mm256_slli_epi64(index, 2);
a0123 = _mm256_blendv_epi8(t01, t23, select0x20);
t0 = _mm256_shuffle_epi8(b0, index);
t1 = _mm256_shuffle_epi8(b1, index);
t01 = _mm256_blendv_epi8(t0, t1, select0x10);
t0 = _mm256_shuffle_epi8(b2, index);
t1 = _mm256_shuffle_epi8(b3, index);
t23 = _mm256_blendv_epi8(t0, t1, select0x10);
b0123 = _mm256_blendv_epi8(t01, t23, select0x20);
select0x40 = _mm256_slli_epi64(index, 1);
r_.m256i[i] = _mm256_blendv_epi8(a0123, b0123, select0x40);
}
#elif defined(SIMDE_X_PERMUTEX2VAR_USE_GENERIC)
simde_x_permutex2var(r_.m128i, a_.m128i, idx_.m128i, b_.m128i, 0, 2);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((idx_.i8[i] & 0x40) ? b_ : a_).i8[idx_.i8[i] & 0x3F];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex2var_epi8
#define _mm512_permutex2var_epi8(a, idx, b) simde_mm512_permutex2var_epi8(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_permutex2var_epi8 (simde__m512i a, simde__mmask64 k, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE)
return _mm512_mask_permutex2var_epi8(a, k, idx, b);
#else
return simde_mm512_mask_mov_epi8(a, k, simde_mm512_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex2var_epi8
#define _mm512_mask_permutex2var_epi8(a, k, idx, b) simde_mm512_mask_permutex2var_epi8(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask2_permutex2var_epi8 (simde__m512i a, simde__m512i idx, simde__mmask64 k, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE)
return _mm512_mask2_permutex2var_epi8(a, idx, k, b);
#else
return simde_mm512_mask_mov_epi8(idx, k, simde_mm512_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask2_permutex2var_epi8
#define _mm512_mask2_permutex2var_epi8(a, idx, k, b) simde_mm512_mask2_permutex2var_epi8(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_permutex2var_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i idx, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VBMI_NATIVE)
return _mm512_maskz_permutex2var_epi8(k, a, idx, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_permutex2var_epi8(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_permutex2var_epi8
#define _mm512_maskz_permutex2var_epi8(k, a, idx, b) simde_mm512_maskz_permutex2var_epi8(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_permutex2var_pd (simde__m512d a, simde__m512i idx, simde__m512d b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_permutex2var_pd(a, idx, b);
#else
return simde_mm512_castsi512_pd(simde_mm512_permutex2var_epi64(simde_mm512_castpd_si512(a), idx, simde_mm512_castpd_si512(b)));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex2var_pd
#define _mm512_permutex2var_pd(a, idx, b) simde_mm512_permutex2var_pd(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_permutex2var_pd (simde__m512d a, simde__mmask8 k, simde__m512i idx, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_permutex2var_pd(a, k, idx, b);
#else
return simde_mm512_mask_mov_pd(a, k, simde_mm512_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex2var_pd
#define _mm512_mask_permutex2var_pd(a, k, idx, b) simde_mm512_mask_permutex2var_pd(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask2_permutex2var_pd (simde__m512d a, simde__m512i idx, simde__mmask8 k, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask2_permutex2var_pd(a, idx, k, b);
#else
return simde_mm512_mask_mov_pd(simde_mm512_castsi512_pd(idx), k, simde_mm512_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask2_permutex2var_pd
#define _mm512_mask2_permutex2var_pd(a, idx, k, b) simde_mm512_mask2_permutex2var_pd(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_permutex2var_pd (simde__mmask8 k, simde__m512d a, simde__m512i idx, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_permutex2var_pd(k, a, idx, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_permutex2var_pd(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_permutex2var_pd
#define _mm512_maskz_permutex2var_pd(k, a, idx, b) simde_mm512_maskz_permutex2var_pd(k, a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_permutex2var_ps (simde__m512 a, simde__m512i idx, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_permutex2var_ps(a, idx, b);
#else
return simde_mm512_castsi512_ps(simde_mm512_permutex2var_epi32(simde_mm512_castps_si512(a), idx, simde_mm512_castps_si512(b)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_permutex2var_ps
#define _mm512_permutex2var_ps(a, idx, b) simde_mm512_permutex2var_ps(a, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_permutex2var_ps (simde__m512 a, simde__mmask16 k, simde__m512i idx, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_permutex2var_ps(a, k, idx, b);
#else
return simde_mm512_mask_mov_ps(a, k, simde_mm512_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_permutex2var_ps
#define _mm512_mask_permutex2var_ps(a, k, idx, b) simde_mm512_mask_permutex2var_ps(a, k, idx, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask2_permutex2var_ps (simde__m512 a, simde__m512i idx, simde__mmask16 k, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask2_permutex2var_ps(a, idx, k, b);
#else
return simde_mm512_mask_mov_ps(simde_mm512_castsi512_ps(idx), k, simde_mm512_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask2_permutex2var_ps
#define _mm512_mask2_permutex2var_ps(a, idx, k, b) simde_mm512_mask2_permutex2var_ps(a, idx, k, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_permutex2var_ps (simde__mmask16 k, simde__m512 a, simde__m512i idx, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_permutex2var_ps(k, a, idx, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_permutex2var_ps(a, idx, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_permutex2var_ps
#define _mm512_maskz_permutex2var_ps(k, a, idx, b) simde_mm512_maskz_permutex2var_ps(k, a, idx, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_PERMUTEX2VAR_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/copysign.h | .h | 3,008 | 87 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_COPYSIGN_H)
#define SIMDE_X86_AVX512_COPYSIGN_H
#include "types.h"
#include "mov.h"
#include "and.h"
#include "andnot.h"
#include "xor.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_x_mm512_copysign_ps(simde__m512 dest, simde__m512 src) {
simde__m512_private
r_,
dest_ = simde__m512_to_private(dest),
src_ = simde__m512_to_private(src);
#if defined(simde_math_copysignf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#else
simde__m512 sgnbit = simde_mm512_xor_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(0.0)), simde_mm512_set1_ps(-SIMDE_FLOAT32_C(0.0)));
return simde_mm512_xor_ps(simde_mm512_and_ps(sgnbit, src), simde_mm512_andnot_ps(sgnbit, dest));
#endif
return simde__m512_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_x_mm512_copysign_pd(simde__m512d dest, simde__m512d src) {
simde__m512d_private
r_,
dest_ = simde__m512d_to_private(dest),
src_ = simde__m512d_to_private(src);
#if defined(simde_math_copysign)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_copysign(dest_.f64[i], src_.f64[i]);
}
#else
simde__m512d sgnbit = simde_mm512_xor_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(0.0)), simde_mm512_set1_pd(-SIMDE_FLOAT64_C(0.0)));
return simde_mm512_xor_pd(simde_mm512_and_pd(sgnbit, src), simde_mm512_andnot_pd(sgnbit, dest));
#endif
return simde__m512d_from_private(r_);
}
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_COPYSIGN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/mov_mask.h | .h | 11,929 | 373 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_AVX512_MOV_MASK_H)
#define SIMDE_X86_AVX512_MOV_MASK_H
#include "types.h"
#include "../avx2.h"
#include "cast.h"
#include "set.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_movepi8_mask (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movepi8_mask(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
return HEDLEY_STATIC_CAST(simde__mmask16, simde_mm_movemask_epi8(a));
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
simde__mmask16 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r |= (a_.i8[i] < 0) ? (UINT64_C(1) << i) : 0;
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_movepi8_mask
#define _mm_movepi8_mask(a) simde_mm_movepi8_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_movepi16_mask (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movepi16_mask(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* There is no 32-bit _mm_movemask_* function, so we use
* _mm_movemask_epi8 then extract the odd bits. */
uint_fast16_t r = HEDLEY_STATIC_CAST(uint_fast16_t, simde_mm_movemask_epi8(a));
r = ( (r >> 1)) & UINT32_C(0x5555);
r = (r | (r >> 1)) & UINT32_C(0x3333);
r = (r | (r >> 2)) & UINT32_C(0x0f0f);
r = (r | (r >> 4)) & UINT32_C(0x00ff);
return HEDLEY_STATIC_CAST(simde__mmask8, r);
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
simde__mmask8 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0;
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_movepi16_mask
#define _mm_movepi16_mask(a) simde_mm_movepi16_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_movepi32_mask (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm_movepi32_mask(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movemask_ps(simde_mm_castsi128_ps(a)));
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
simde__mmask8 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0;
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_movepi32_mask
#define _mm_movepi32_mask(a) simde_mm_movepi32_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_movepi64_mask (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm_movepi64_mask(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
return HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movemask_pd(simde_mm_castsi128_pd(a)));
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
simde__mmask8 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0;
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_movepi64_mask
#define _mm_movepi64_mask(a) simde_mm_movepi64_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_movepi8_mask (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_movepi8_mask(a);
#else
simde__m256i_private a_ = simde__m256i_to_private(a);
simde__mmask32 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask32, simde_mm_movepi8_mask(a_.m128i[i])) << (i * 16);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r |= (a_.i8[i] < 0) ? (UINT64_C(1) << i) : 0;
}
#endif
return HEDLEY_STATIC_CAST(simde__mmask32, r);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_movepi8_mask
#define _mm256_movepi8_mask(a) simde_mm256_movepi8_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_movepi16_mask (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_movepi16_mask(a);
#else
simde__m256i_private a_ = simde__m256i_to_private(a);
simde__mmask16 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask16, simde_mm_movepi16_mask(a_.m128i[i])) << (i * 8);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_movepi16_mask
#define _mm256_movepi16_mask(a) simde_mm256_movepi16_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_movepi32_mask (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm256_movepi32_mask(a);
#else
simde__m256i_private a_ = simde__m256i_to_private(a);
simde__mmask8 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask16, simde_mm_movepi32_mask(a_.m128i[i])) << (i * 4);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm256_movepi32_mask
#define _mm256_movepi32_mask(a) simde_mm256_movepi32_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_movepi64_mask (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm256_movepi64_mask(a);
#else
simde__m256i_private a_ = simde__m256i_to_private(a);
simde__mmask8 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movepi64_mask(a_.m128i[i])) << (i * 2);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm256_movepi64_mask
#define _mm256_movepi64_mask(a) simde_mm256_movepi64_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_movepi8_mask (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_movepi8_mask(a);
#else
simde__m512i_private a_ = simde__m512i_to_private(a);
simde__mmask64 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask64, simde_mm256_movepi8_mask(a_.m256i[i])) << (i * 32);
}
#else
r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r |= (a_.i8[i] < 0) ? (UINT64_C(1) << i) : 0;
}
#endif
return HEDLEY_STATIC_CAST(simde__mmask64, r);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_movepi8_mask
#define _mm512_movepi8_mask(a) simde_mm512_movepi8_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_movepi16_mask (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_movepi16_mask(a);
#else
simde__m512i_private a_ = simde__m512i_to_private(a);
simde__mmask32 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask32, simde_mm256_movepi16_mask(a_.m256i[i])) << (i * 16);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r |= (a_.i16[i] < 0) ? (UINT32_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_movepi16_mask
#define _mm512_movepi16_mask(a) simde_mm512_movepi16_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_movepi32_mask (simde__m512i a) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_movepi32_mask(a);
#else
simde__m512i_private a_ = simde__m512i_to_private(a);
simde__mmask16 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask16, simde_mm256_movepi32_mask(a_.m256i[i])) << (i * 8);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r |= (a_.i32[i] < 0) ? (UINT32_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_movepi32_mask
#define _mm512_movepi32_mask(a) simde_mm512_movepi32_mask(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_movepi64_mask (simde__m512i a) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm512_movepi64_mask(a);
#else
simde__m512i_private a_ = simde__m512i_to_private(a);
simde__mmask8 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r |= simde_mm256_movepi64_mask(a_.m256i[i]) << (i * 4);
}
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r |= (a_.i64[i] < 0) ? (UINT32_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_movepi64_mask
#define _mm512_movepi64_mask(a) simde_mm512_movepi64_mask(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_MOV_MASK_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cast.h | .h | 9,905 | 358 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
* 2020 Hidayat Khan <huk2209@gmail.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_CAST_H)
#define SIMDE_X86_AVX512_CAST_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_castpd_ps (simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castpd_ps(a);
#else
simde__m512 r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castpd_ps
#define _mm512_castpd_ps(a) simde_mm512_castpd_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_castpd_si512 (simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castpd_si512(a);
#else
simde__m512i r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castpd_si512
#define _mm512_castpd_si512(a) simde_mm512_castpd_si512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_castps_pd (simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castps_pd(a);
#else
simde__m512d r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castps_pd
#define _mm512_castps_pd(a) simde_mm512_castps_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_castps_si512 (simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castps_si512(a);
#else
simde__m512i r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castps_si512
#define _mm512_castps_si512(a) simde_mm512_castps_si512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_castph_si512 (simde__m512h a) {
#if defined(SIMDE_X86_AVX512FP16_NATIVE)
return _mm512_castph_si512(a);
#else
simde__m512i r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512FP16_ENABLE_NATIVE_ALIASES)
#undef _mm512_castph_si512
#define _mm512_castph_si512(a) simde_mm512_castph_si512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512h
simde_mm512_castsi512_ph (simde__m512i a) {
#if defined(SIMDE_X86_AVX512FP16_NATIVE)
return _mm512_castsi512_ph(a);
#else
simde__m512h r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512FP16_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi512_ph
#define _mm512_castsi512_ph(a) simde_mm512_castsi512_ph(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_castsi512_ps (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castsi512_ps(a);
#else
simde__m512 r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi512_ps
#define _mm512_castsi512_ps(a) simde_mm512_castsi512_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_castsi512_pd (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castsi512_pd(a);
#else
simde__m512d r;
simde_memcpy(&r, &a, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi512_pd
#define _mm512_castsi512_pd(a) simde_mm512_castsi512_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_castpd128_pd512 (simde__m128d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castpd128_pd512(a);
#else
simde__m512d_private r_;
r_.m128d[0] = a;
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castpd128_pd512
#define _mm512_castpd128_pd512(a) simde_mm512_castpd128_pd512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_castpd256_pd512 (simde__m256d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castpd256_pd512(a);
#else
simde__m512d_private r_;
r_.m256d[0] = a;
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castpd256_pd512
#define _mm512_castpd256_pd512(a) simde_mm512_castpd256_pd512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm512_castpd512_pd128 (simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castpd512_pd128(a);
#else
simde__m512d_private a_ = simde__m512d_to_private(a);
return a_.m128d[0];
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castpd512_pd128
#define _mm512_castpd512_pd128(a) simde_mm512_castpd512_pd128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm512_castpd512_pd256 (simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castpd512_pd256(a);
#else
simde__m512d_private a_ = simde__m512d_to_private(a);
return a_.m256d[0];
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castpd512_pd256
#define _mm512_castpd512_pd256(a) simde_mm512_castpd512_pd256(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_castps128_ps512 (simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castps128_ps512(a);
#else
simde__m512_private r_;
r_.m128[0] = a;
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castps128_ps512
#define _mm512_castps128_ps512(a) simde_mm512_castps128_ps512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_castps256_ps512 (simde__m256 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castps256_ps512(a);
#else
simde__m512_private r_;
r_.m256[0] = a;
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castps256_ps512
#define _mm512_castps256_ps512(a) simde_mm512_castps256_ps512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm512_castps512_ps128 (simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castps512_ps128(a);
#else
simde__m512_private a_ = simde__m512_to_private(a);
return a_.m128[0];
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castps512_ps128
#define _mm512_castps512_ps128(a) simde_mm512_castps512_ps128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm512_castps512_ps256 (simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castps512_ps256(a);
#else
simde__m512_private a_ = simde__m512_to_private(a);
return a_.m256[0];
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castps512_ps256
#define _mm512_castps512_ps256(a) simde_mm512_castps512_ps256(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_castsi128_si512 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castsi128_si512(a);
#else
simde__m512i_private r_;
r_.m128i[0] = a;
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi128_si512
#define _mm512_castsi128_si512(a) simde_mm512_castsi128_si512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_castsi256_si512 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castsi256_si512(a);
#else
simde__m512i_private r_;
r_.m256i[0] = a;
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi256_si512
#define _mm512_castsi256_si512(a) simde_mm512_castsi256_si512(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm512_castsi512_si128 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castsi512_si128(a);
#else
simde__m512i_private a_ = simde__m512i_to_private(a);
return a_.m128i[0];
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi512_si128
#define _mm512_castsi512_si128(a) simde_mm512_castsi512_si128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm512_castsi512_si256 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_castsi512_si256(a);
#else
simde__m512i_private a_ = simde__m512i_to_private(a);
return a_.m256i[0];
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_castsi512_si256
#define _mm512_castsi512_si256(a) simde_mm512_castsi512_si256(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CAST_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/dpbf16.h | .h | 9,991 | 282 | #if !defined(SIMDE_X86_AVX512_DPBF16_H)
#define SIMDE_X86_AVX512_DPBF16_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_dpbf16_ps (simde__m128 src, simde__m128bh a, simde__m128bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_dpbf16_ps(src, a, b);
#else
simde__m128_private
src_ = simde__m128_to_private(src);
simde__m128bh_private
a_ = simde__m128bh_to_private(a),
b_ = simde__m128bh_to_private(b);
#if ! ( defined(SIMDE_ARCH_X86) && defined(HEDLEY_GCC_VERSION) ) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_)
uint32_t x1 SIMDE_VECTOR(32);
uint32_t x2 SIMDE_VECTOR(32);
simde__m128_private
r1_[2],
r2_[2];
a_.u16 =
SIMDE_SHUFFLE_VECTOR_(
16, 16,
a_.u16, a_.u16,
0, 2, 4, 6,
1, 3, 5, 7
);
b_.u16 =
SIMDE_SHUFFLE_VECTOR_(
16, 16,
b_.u16, b_.u16,
0, 2, 4, 6,
1, 3, 5, 7
);
SIMDE_CONVERT_VECTOR_(x1, a_.u16);
SIMDE_CONVERT_VECTOR_(x2, b_.u16);
x1 <<= 16;
x2 <<= 16;
simde_memcpy(&r1_, &x1, sizeof(x1));
simde_memcpy(&r2_, &x2, sizeof(x2));
src_.f32 +=
HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[0].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[0].u32) +
HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[1].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[1].u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
src_.f32[i / 2] += (simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) << 16) * simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) << 16));
}
#endif
return simde__m128_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_dpbf16_ps
#define _mm_dpbf16_ps(src, a, b) simde_mm_dpbf16_ps(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_dpbf16_ps (simde__m128 src, simde__mmask8 k, simde__m128bh a, simde__m128bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_dpbf16_ps(src, k, a, b);
#else
return simde_mm_mask_mov_ps(src, k, simde_mm_dpbf16_ps(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_dpbf16_ps
#define _mm_mask_dpbf16_ps(src, k, a, b) simde_mm_mask_dpbf16_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_dpbf16_ps (simde__mmask8 k, simde__m128 src, simde__m128bh a, simde__m128bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_dpbf16_ps(k, src, a, b);
#else
return simde_mm_maskz_mov_ps(k, simde_mm_dpbf16_ps(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_dpbf16_ps
#define _mm_maskz_dpbf16_ps(k, src, a, b) simde_mm_maskz_dpbf16_ps(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_dpbf16_ps (simde__m256 src, simde__m256bh a, simde__m256bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_dpbf16_ps(src, a, b);
#else
simde__m256_private
src_ = simde__m256_to_private(src);
simde__m256bh_private
a_ = simde__m256bh_to_private(a),
b_ = simde__m256bh_to_private(b);
#if ! ( defined(SIMDE_ARCH_X86) && defined(HEDLEY_GCC_VERSION) ) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_)
uint32_t x1 SIMDE_VECTOR(64);
uint32_t x2 SIMDE_VECTOR(64);
simde__m256_private
r1_[2],
r2_[2];
a_.u16 =
SIMDE_SHUFFLE_VECTOR_(
16, 32,
a_.u16, a_.u16,
0, 2, 4, 6, 8, 10, 12, 14,
1, 3, 5, 7, 9, 11, 13, 15
);
b_.u16 =
SIMDE_SHUFFLE_VECTOR_(
16, 32,
b_.u16, b_.u16,
0, 2, 4, 6, 8, 10, 12, 14,
1, 3, 5, 7, 9, 11, 13, 15
);
SIMDE_CONVERT_VECTOR_(x1, a_.u16);
SIMDE_CONVERT_VECTOR_(x2, b_.u16);
x1 <<= 16;
x2 <<= 16;
simde_memcpy(&r1_, &x1, sizeof(x1));
simde_memcpy(&r2_, &x2, sizeof(x2));
src_.f32 +=
HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[0].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[0].u32) +
HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[1].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[1].u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
src_.f32[i / 2] += (simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) << 16) * simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) << 16));
}
#endif
return simde__m256_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_dpbf16_ps
#define _mm256_dpbf16_ps(src, a, b) simde_mm256_dpbf16_ps(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_dpbf16_ps (simde__m256 src, simde__mmask8 k, simde__m256bh a, simde__m256bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_dpbf16_ps(src, k, a, b);
#else
return simde_mm256_mask_mov_ps(src, k, simde_mm256_dpbf16_ps(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_dpbf16_ps
#define _mm256_mask_dpbf16_ps(src, k, a, b) simde_mm256_mask_dpbf16_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_dpbf16_ps (simde__mmask8 k, simde__m256 src, simde__m256bh a, simde__m256bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_dpbf16_ps(k, src, a, b);
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_dpbf16_ps(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_dpbf16_ps
#define _mm256_maskz_dpbf16_ps(k, src, a, b) simde_mm256_maskz_dpbf16_ps(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_dpbf16_ps (simde__m512 src, simde__m512bh a, simde__m512bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE)
return _mm512_dpbf16_ps(src, a, b);
#else
simde__m512_private
src_ = simde__m512_to_private(src);
simde__m512bh_private
a_ = simde__m512bh_to_private(a),
b_ = simde__m512bh_to_private(b);
#if ! ( defined(SIMDE_ARCH_X86) && defined(HEDLEY_GCC_VERSION) ) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_)
uint32_t x1 SIMDE_VECTOR(128);
uint32_t x2 SIMDE_VECTOR(128);
simde__m512_private
r1_[2],
r2_[2];
a_.u16 =
SIMDE_SHUFFLE_VECTOR_(
16, 64,
a_.u16, a_.u16,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
);
b_.u16 =
SIMDE_SHUFFLE_VECTOR_(
16, 64,
b_.u16, b_.u16,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
);
SIMDE_CONVERT_VECTOR_(x1, a_.u16);
SIMDE_CONVERT_VECTOR_(x2, b_.u16);
x1 <<= 16;
x2 <<= 16;
simde_memcpy(&r1_, &x1, sizeof(x1));
simde_memcpy(&r2_, &x2, sizeof(x2));
src_.f32 +=
HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[0].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[0].u32) +
HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[1].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[1].u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
src_.f32[i / 2] += (simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) << 16) * simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) << 16));
}
#endif
return simde__m512_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES)
#undef _mm512_dpbf16_ps
#define _mm512_dpbf16_ps(src, a, b) simde_mm512_dpbf16_ps(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_dpbf16_ps (simde__m512 src, simde__mmask16 k, simde__m512bh a, simde__m512bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE)
return _mm512_mask_dpbf16_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_dpbf16_ps(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_dpbf16_ps
#define _mm512_mask_dpbf16_ps(src, k, a, b) simde_mm512_mask_dpbf16_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_dpbf16_ps (simde__mmask16 k, simde__m512 src, simde__m512bh a, simde__m512bh b) {
#if defined(SIMDE_X86_AVX512BF16_NATIVE)
return _mm512_maskz_dpbf16_ps(k, src, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_dpbf16_ps(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_dpbf16_ps
#define _mm512_maskz_dpbf16_ps(k, src, a, b) simde_mm512_maskz_dpbf16_ps(k, src, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_DPBF16_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/dpbusds.h | .h | 13,835 | 345 | #if !defined(SIMDE_X86_AVX512_DPBUSDS_H)
#define SIMDE_X86_AVX512_DPBUSDS_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_dpbusds_epi32(simde__m128i src, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_dpbusds_epi32(src, a, b);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
uint32_t x1_ SIMDE_VECTOR(64);
int32_t x2_ SIMDE_VECTOR(64);
simde__m128i_private
r1_[4],
r2_[4];
a_.u8 =
SIMDE_SHUFFLE_VECTOR_(
8, 16,
a_.u8, a_.u8,
0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15
);
b_.i8 =
SIMDE_SHUFFLE_VECTOR_(
8, 16,
b_.i8, b_.i8,
0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15
);
SIMDE_CONVERT_VECTOR_(x1_, a_.u8);
SIMDE_CONVERT_VECTOR_(x2_, b_.i8);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
uint32_t au SIMDE_VECTOR(16) =
HEDLEY_REINTERPRET_CAST(
__typeof__(au),
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32)
);
uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), src_.i32);
uint32_t ru SIMDE_VECTOR(16) = au + bu;
au = (au >> 31) + INT32_MAX;
uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0);
src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0]) / 4) ; i++) {
src_.i32[i] =
simde_math_adds_i32(
src_.i32[i],
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) ]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 1]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 2]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 2]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 3]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 3])
);
}
#endif
return simde__m128i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_dpbusds_epi32
#define _mm_dpbusds_epi32(src, a, b) simde_mm_dpbusds_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_dpbusds_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_mask_dpbusds_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_dpbusds_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_dpbusds_epi32
#define _mm_mask_dpbusds_epi32(src, k, a, b) simde_mm_mask_dpbusds_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_dpbusds_epi32(simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_maskz_dpbusds_epi32(k, src, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_dpbusds_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_dpbusds_epi32
#define _mm_maskz_dpbusds_epi32(k, src, a, b) simde_mm_maskz_dpbusds_epi32(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_dpbusds_epi32(simde__m256i src, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_dpbusds_epi32(src, a, b);
#else
simde__m256i_private
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
src_.m128i[0] = simde_mm_dpbusds_epi32(src_.m128i[0], a_.m128i[0], b_.m128i[0]);
src_.m128i[1] = simde_mm_dpbusds_epi32(src_.m128i[1], a_.m128i[1], b_.m128i[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
uint32_t x1_ SIMDE_VECTOR(128);
int32_t x2_ SIMDE_VECTOR(128);
simde__m256i_private
r1_[4],
r2_[4];
a_.u8 =
SIMDE_SHUFFLE_VECTOR_(
8, 32,
a_.u8, a_.u8,
0, 4, 8, 12, 16, 20, 24, 28,
1, 5, 9, 13, 17, 21, 25, 29,
2, 6, 10, 14, 18, 22, 26, 30,
3, 7, 11, 15, 19, 23, 27, 31
);
b_.i8 =
SIMDE_SHUFFLE_VECTOR_(
8, 32,
b_.i8, b_.i8,
0, 4, 8, 12, 16, 20, 24, 28,
1, 5, 9, 13, 17, 21, 25, 29,
2, 6, 10, 14, 18, 22, 26, 30,
3, 7, 11, 15, 19, 23, 27, 31
);
SIMDE_CONVERT_VECTOR_(x1_, a_.u8);
SIMDE_CONVERT_VECTOR_(x2_, b_.i8);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
uint32_t au SIMDE_VECTOR(32) =
HEDLEY_REINTERPRET_CAST(
__typeof__(au),
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32)
);
uint32_t bu SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), src_.i32);
uint32_t ru SIMDE_VECTOR(32) = au + bu;
au = (au >> 31) + INT32_MAX;
uint32_t m SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0);
src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0]) / 4) ; i++) {
src_.i32[i] =
simde_math_adds_i32(
src_.i32[i],
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) ]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 1]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 2]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 2]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 3]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 3])
);
}
#endif
return simde__m256i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_dpbusds_epi32
#define _mm256_dpbusds_epi32(src, a, b) simde_mm256_dpbusds_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_dpbusds_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_mask_dpbusds_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpbusds_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_dpbusds_epi32
#define _mm256_mask_dpbusds_epi32(src, k, a, b) simde_mm256_mask_dpbusds_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_dpbusds_epi32(simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_maskz_dpbusds_epi32(k, src, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpbusds_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_dpbusds_epi32
#define _mm256_maskz_dpbusds_epi32(k, src, a, b) simde_mm256_maskz_dpbusds_epi32(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_dpbusds_epi32(simde__m512i src, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_dpbusds_epi32(src, a, b);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
src_.m256i[0] = simde_mm256_dpbusds_epi32(src_.m256i[0], a_.m256i[0], b_.m256i[0]);
src_.m256i[1] = simde_mm256_dpbusds_epi32(src_.m256i[1], a_.m256i[1], b_.m256i[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
uint32_t x1_ SIMDE_VECTOR(256);
int32_t x2_ SIMDE_VECTOR(256);
simde__m512i_private
r1_[4],
r2_[4];
a_.u8 =
SIMDE_SHUFFLE_VECTOR_(
8, 64,
a_.u8, a_.u8,
0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60,
1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61,
2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62,
3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63
);
b_.i8 =
SIMDE_SHUFFLE_VECTOR_(
8, 64,
b_.i8, b_.i8,
0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60,
1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61,
2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62,
3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63
);
SIMDE_CONVERT_VECTOR_(x1_, a_.u8);
SIMDE_CONVERT_VECTOR_(x2_, b_.i8);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
uint32_t au SIMDE_VECTOR(64) =
HEDLEY_REINTERPRET_CAST(
__typeof__(au),
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32)
);
uint32_t bu SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), src_.i32);
uint32_t ru SIMDE_VECTOR(64) = au + bu;
au = (au >> 31) + INT32_MAX;
uint32_t m SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0);
src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0]) / 4) ; i++) {
src_.i32[i] =
simde_math_adds_i32(
src_.i32[i],
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) ]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 1]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 2]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 2]) +
HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 3]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 3])
);
}
#endif
return simde__m512i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_dpbusds_epi32
#define _mm512_dpbusds_epi32(src, a, b) simde_mm512_dpbusds_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_dpbusds_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_mask_dpbusds_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpbusds_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_dpbusds_epi32
#define _mm512_mask_dpbusds_epi32(src, k, a, b) simde_mm512_mask_dpbusds_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_dpbusds_epi32(simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_maskz_dpbusds_epi32(k, src, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpbusds_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_dpbusds_epi32
#define _mm512_maskz_dpbusds_epi32(k, src, a, b) simde_mm512_maskz_dpbusds_epi32(k, src, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_DPBUSDS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/range.h | .h | 28,109 | 746 | #if !defined(SIMDE_X86_AVX512_RANGE_H)
#define SIMDE_X86_AVX512_RANGE_H
#include "types.h"
#include "max.h"
#include "min.h"
#include "set1.h"
#include "copysign.h"
#include "abs.h"
#include "setzero.h"
#include "cmp.h"
#include "or.h"
#include "andnot.h"
#include "insert.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_range_ps (simde__m128 a, simde__m128 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128 r;
switch (imm8 & 3) {
case 0:
r = simde_mm_min_ps(a, b);
break;
case 1:
r = simde_mm_max_ps(a, b);
break;
case 2:
r = simde_x_mm_select_ps(b, a, simde_mm_cmple_ps(simde_x_mm_abs_ps(a), simde_x_mm_abs_ps(b)));
break;
case 3:
r = simde_x_mm_select_ps(b, a, simde_mm_cmpge_ps(simde_x_mm_abs_ps(a), simde_x_mm_abs_ps(b)));
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r = simde_x_mm_copysign_ps(r, a);
break;
case 8:
r = simde_mm_andnot_ps(simde_mm_set1_ps(SIMDE_FLOAT32_C(-0.0)), r);
break;
case 12:
r = simde_mm_or_ps(simde_mm_set1_ps(SIMDE_FLOAT32_C(-0.0)), r);
break;
default:
break;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_range_ps(a, b, imm8) _mm_range_ps((a), (b), (imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_range_ps
#define _mm_range_ps(a, b, imm8) simde_mm_range_ps(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_mask_range_ps(src, k, a, b, imm8) _mm_mask_range_ps(src, k, a, b, imm8)
#else
#define simde_mm_mask_range_ps(src, k, a, b, imm8) simde_mm_mask_mov_ps(src, k, simde_mm_range_ps(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_range_ps
#define _mm_mask_range_ps(src, k, a, b, imm8) simde_mm_mask_range_ps(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_maskz_range_ps(k, a, b, imm8) _mm_maskz_range_ps(k, a, b, imm8)
#else
#define simde_mm_maskz_range_ps(k, a, b, imm8) simde_mm_maskz_mov_ps(k, simde_mm_range_ps(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_range_ps
#define _mm_maskz_range_ps(k, a, b, imm8) simde_mm_maskz_range_ps(k, a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_range_ps (simde__m256 a, simde__m256 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m256 r;
switch (imm8 & 3) {
case 0:
r = simde_mm256_min_ps(a, b);
break;
case 1:
r = simde_mm256_max_ps(a, b);
break;
case 2:
r = simde_x_mm256_select_ps(b, a, simde_mm256_cmp_ps(simde_x_mm256_abs_ps(a), simde_x_mm256_abs_ps(b), SIMDE_CMP_LE_OQ));
break;
case 3:
r = simde_x_mm256_select_ps(b, a, simde_mm256_cmp_ps(simde_x_mm256_abs_ps(a), simde_x_mm256_abs_ps(b), SIMDE_CMP_GE_OQ));
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r = simde_x_mm256_copysign_ps(r, a);
break;
case 8:
r = simde_mm256_andnot_ps(simde_mm256_set1_ps(SIMDE_FLOAT32_C(-0.0)), r);
break;
case 12:
r = simde_mm256_or_ps(simde_mm256_set1_ps(SIMDE_FLOAT32_C(-0.0)), r);
break;
default:
break;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_range_ps(a, b, imm8) _mm256_range_ps((a), (b), (imm8))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm256_range_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m256_private \
simde_mm256_range_ps_r_ = simde__m256_to_private(simde_mm256_setzero_ps()), \
simde_mm256_range_ps_a_ = simde__m256_to_private(a), \
simde_mm256_range_ps_b_ = simde__m256_to_private(b); \
\
for (size_t simde_mm256_range_ps_i = 0 ; simde_mm256_range_ps_i < (sizeof(simde_mm256_range_ps_r_.m128) / sizeof(simde_mm256_range_ps_r_.m128[0])) ; simde_mm256_range_ps_i++) { \
simde_mm256_range_ps_r_.m128[simde_mm256_range_ps_i] = simde_mm_range_ps(simde_mm256_range_ps_a_.m128[simde_mm256_range_ps_i], simde_mm256_range_ps_b_.m128[simde_mm256_range_ps_i], imm8); \
} \
\
simde__m256_from_private(simde_mm256_range_ps_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_range_ps
#define _mm256_range_ps(a, b, imm8) simde_mm256_range_ps(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_mask_range_ps(src, k, a, b, imm8) _mm256_mask_range_ps(src, k, a, b, imm8)
#else
#define simde_mm256_mask_range_ps(src, k, a, b, imm8) simde_mm256_mask_mov_ps(src, k, simde_mm256_range_ps(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_range_ps
#define _mm256_mask_range_ps(src, k, a, b, imm8) simde_mm256_mask_range_ps(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_maskz_range_ps(k, a, b, imm8) _mm256_maskz_range_ps(k, a, b, imm8)
#else
#define simde_mm256_maskz_range_ps(k, a, b, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_range_ps(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_range_ps
#define _mm256_maskz_range_ps(k, a, b, imm8) simde_mm256_maskz_range_ps(k, a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_range_ps (simde__m512 a, simde__m512 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m512 r;
switch (imm8 & 3) {
case 0:
r = simde_mm512_min_ps(a, b);
break;
case 1:
r = simde_mm512_max_ps(a, b);
break;
case 2:
r = simde_mm512_mask_mov_ps(b, simde_mm512_cmp_ps_mask(simde_mm512_abs_ps(a), simde_mm512_abs_ps(b), SIMDE_CMP_LE_OS), a);
break;
case 3:
r = simde_mm512_mask_mov_ps(a, simde_mm512_cmp_ps_mask(simde_mm512_abs_ps(b), simde_mm512_abs_ps(a), SIMDE_CMP_GE_OS), b);
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r = simde_x_mm512_copysign_ps(r, a);
break;
case 8:
r = simde_mm512_andnot_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(-0.0)), r);
break;
case 12:
r = simde_mm512_or_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(-0.0)), r);
break;
default:
break;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm512_range_ps(a, b, imm8) _mm512_range_ps((a), (b), (imm8))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_range_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512_private \
simde_mm512_range_ps_r_ = simde__m512_to_private(simde_mm512_setzero_ps()), \
simde_mm512_range_ps_a_ = simde__m512_to_private(a), \
simde_mm512_range_ps_b_ = simde__m512_to_private(b); \
\
for (size_t simde_mm512_range_ps_i = 0 ; simde_mm512_range_ps_i < (sizeof(simde_mm512_range_ps_r_.m128) / sizeof(simde_mm512_range_ps_r_.m128[0])) ; simde_mm512_range_ps_i++) { \
simde_mm512_range_ps_r_.m128[simde_mm512_range_ps_i] = simde_mm_range_ps(simde_mm512_range_ps_a_.m128[simde_mm512_range_ps_i], simde_mm512_range_ps_b_.m128[simde_mm512_range_ps_i], imm8); \
} \
\
simde__m512_from_private(simde_mm512_range_ps_r_); \
}))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_range_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512_private \
simde_mm512_range_ps_r_ = simde__m512_to_private(simde_mm512_setzero_ps()), \
simde_mm512_range_ps_a_ = simde__m512_to_private(a), \
simde_mm512_range_ps_b_ = simde__m512_to_private(b); \
\
for (size_t simde_mm512_range_ps_i = 0 ; simde_mm512_range_ps_i < (sizeof(simde_mm512_range_ps_r_.m256) / sizeof(simde_mm512_range_ps_r_.m256[0])) ; simde_mm512_range_ps_i++) { \
simde_mm512_range_ps_r_.m256[simde_mm512_range_ps_i] = simde_mm256_range_ps(simde_mm512_range_ps_a_.m256[simde_mm512_range_ps_i], simde_mm512_range_ps_b_.m256[simde_mm512_range_ps_i], imm8); \
} \
\
simde__m512_from_private(simde_mm512_range_ps_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_range_ps
#define _mm512_range_ps(a, b, imm8) simde_mm512_range_ps(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_mask_range_ps(src, k, a, b, imm8) _mm512_mask_range_ps(src, k, a, b, imm8)
#else
#define simde_mm512_mask_range_ps(src, k, a, b, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_range_ps(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_range_ps
#define _mm512_mask_range_ps(src, k, a, b, imm8) simde_mm512_mask_range_ps(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_maskz_range_ps(k, a, b, imm8) _mm512_maskz_range_ps(k, a, b, imm8)
#else
#define simde_mm512_maskz_range_ps(k, a, b, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_range_ps(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_range_ps
#define _mm512_maskz_range_ps(k, a, b, imm8) simde_mm512_maskz_range_ps(k, a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_range_pd (simde__m128d a, simde__m128d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128d r;
switch (imm8 & 3) {
case 0:
r = simde_mm_min_pd(a, b);
break;
case 1:
r = simde_mm_max_pd(a, b);
break;
case 2:
r = simde_x_mm_select_pd(b, a, simde_mm_cmple_pd(simde_x_mm_abs_pd(a), simde_x_mm_abs_pd(b)));
break;
case 3:
r = simde_x_mm_select_pd(b, a, simde_mm_cmpge_pd(simde_x_mm_abs_pd(a), simde_x_mm_abs_pd(b)));
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r = simde_x_mm_copysign_pd(r, a);
break;
case 8:
r = simde_mm_andnot_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(-0.0)), r);
break;
case 12:
r = simde_mm_or_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(-0.0)), r);
break;
default:
break;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_range_pd(a, b, imm8) _mm_range_pd((a), (b), (imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_range_pd
#define _mm_range_pd(a, b, imm8) simde_mm_range_pd(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_mask_range_pd(src, k, a, b, imm8) _mm_mask_range_pd(src, k, a, b, imm8)
#else
#define simde_mm_mask_range_pd(src, k, a, b, imm8) simde_mm_mask_mov_pd(src, k, simde_mm_range_pd(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_range_pd
#define _mm_mask_range_pd(src, k, a, b, imm8) simde_mm_mask_range_pd(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_maskz_range_pd(k, a, b, imm8) _mm_maskz_range_pd(k, a, b, imm8)
#else
#define simde_mm_maskz_range_pd(k, a, b, imm8) simde_mm_maskz_mov_pd(k, simde_mm_range_pd(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_range_pd
#define _mm_maskz_range_pd(k, a, b, imm8) simde_mm_maskz_range_pd(k, a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_range_pd (simde__m256d a, simde__m256d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m256d r;
switch (imm8 & 3) {
case 0:
r = simde_mm256_min_pd(a, b);
break;
case 1:
r = simde_mm256_max_pd(a, b);
break;
case 2:
r = simde_x_mm256_select_pd(b, a, simde_mm256_cmp_pd(simde_x_mm256_abs_pd(a), simde_x_mm256_abs_pd(b), SIMDE_CMP_LE_OQ));
break;
case 3:
r = simde_x_mm256_select_pd(b, a, simde_mm256_cmp_pd(simde_x_mm256_abs_pd(a), simde_x_mm256_abs_pd(b), SIMDE_CMP_GE_OQ));
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r = simde_x_mm256_copysign_pd(r, a);
break;
case 8:
r = simde_mm256_andnot_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(-0.0)), r);
break;
case 12:
r = simde_mm256_or_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(-0.0)), r);
break;
default:
break;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_range_pd(a, b, imm8) _mm256_range_pd((a), (b), (imm8))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm256_range_pd(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m256d_private \
simde_mm256_range_pd_r_ = simde__m256d_to_private(simde_mm256_setzero_pd()), \
simde_mm256_range_pd_a_ = simde__m256d_to_private(a), \
simde_mm256_range_pd_b_ = simde__m256d_to_private(b); \
\
for (size_t simde_mm256_range_pd_i = 0 ; simde_mm256_range_pd_i < (sizeof(simde_mm256_range_pd_r_.m128d) / sizeof(simde_mm256_range_pd_r_.m128d[0])) ; simde_mm256_range_pd_i++) { \
simde_mm256_range_pd_r_.m128d[simde_mm256_range_pd_i] = simde_mm_range_pd(simde_mm256_range_pd_a_.m128d[simde_mm256_range_pd_i], simde_mm256_range_pd_b_.m128d[simde_mm256_range_pd_i], imm8); \
} \
\
simde__m256d_from_private(simde_mm256_range_pd_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_range_pd
#define _mm256_range_pd(a, b, imm8) simde_mm256_range_pd(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_mask_range_pd(src, k, a, b, imm8) _mm256_mask_range_pd(src, k, a, b, imm8)
#else
#define simde_mm256_mask_range_pd(src, k, a, b, imm8) simde_mm256_mask_mov_pd(src, k, simde_mm256_range_pd(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_range_pd
#define _mm256_mask_range_pd(src, k, a, b, imm8) simde_mm256_mask_range_pd(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_maskz_range_pd(k, a, b, imm8) _mm256_maskz_range_pd(k, a, b, imm8)
#else
#define simde_mm256_maskz_range_pd(k, a, b, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_range_pd(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_range_pd
#define _mm256_maskz_range_pd(k, a, b, imm8) simde_mm256_maskz_range_pd(k, a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_range_pd (simde__m512d a, simde__m512d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m512d r;
switch (imm8 & 3) {
case 0:
r = simde_mm512_min_pd(a, b);
break;
case 1:
r = simde_mm512_max_pd(a, b);
break;
case 2:
r = simde_mm512_mask_mov_pd(b, simde_mm512_cmp_pd_mask(simde_mm512_abs_pd(a), simde_mm512_abs_pd(b), SIMDE_CMP_LE_OS), a);
break;
case 3:
r = simde_mm512_mask_mov_pd(a, simde_mm512_cmp_pd_mask(simde_mm512_abs_pd(b), simde_mm512_abs_pd(a), SIMDE_CMP_GE_OS), b);
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r = simde_x_mm512_copysign_pd(r, a);
break;
case 8:
r = simde_mm512_andnot_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(-0.0)), r);
break;
case 12:
r = simde_mm512_or_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(-0.0)), r);
break;
default:
break;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm512_range_pd(a, b, imm8) _mm512_range_pd((a), (b), (imm8))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_range_pd(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d_private \
simde_mm512_range_pd_r_ = simde__m512d_to_private(simde_mm512_setzero_pd()), \
simde_mm512_range_pd_a_ = simde__m512d_to_private(a), \
simde_mm512_range_pd_b_ = simde__m512d_to_private(b); \
\
for (size_t simde_mm512_range_pd_i = 0 ; simde_mm512_range_pd_i < (sizeof(simde_mm512_range_pd_r_.m128d) / sizeof(simde_mm512_range_pd_r_.m128d[0])) ; simde_mm512_range_pd_i++) { \
simde_mm512_range_pd_r_.m128d[simde_mm512_range_pd_i] = simde_mm_range_pd(simde_mm512_range_pd_a_.m128d[simde_mm512_range_pd_i], simde_mm512_range_pd_b_.m128d[simde_mm512_range_pd_i], imm8); \
} \
\
simde__m512d_from_private(simde_mm512_range_pd_r_); \
}))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_range_pd(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d_private \
simde_mm512_range_pd_r_ = simde__m512d_to_private(simde_mm512_setzero_pd()), \
simde_mm512_range_pd_a_ = simde__m512d_to_private(a), \
simde_mm512_range_pd_b_ = simde__m512d_to_private(b); \
\
for (size_t simde_mm512_range_pd_i = 0 ; simde_mm512_range_pd_i < (sizeof(simde_mm512_range_pd_r_.m256d) / sizeof(simde_mm512_range_pd_r_.m256d[0])) ; simde_mm512_range_pd_i++) { \
simde_mm512_range_pd_r_.m256d[simde_mm512_range_pd_i] = simde_mm256_range_pd(simde_mm512_range_pd_a_.m256d[simde_mm512_range_pd_i], simde_mm512_range_pd_b_.m256d[simde_mm512_range_pd_i], imm8); \
} \
\
simde__m512d_from_private(simde_mm512_range_pd_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_range_pd
#define _mm512_range_pd(a, b, imm8) simde_mm512_range_pd(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_mask_range_pd(src, k, a, b, imm8) _mm512_mask_range_pd(src, k, a, b, imm8)
#else
#define simde_mm512_mask_range_pd(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_range_pd(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_range_pd
#define _mm512_mask_range_pd(src, k, a, b, imm8) simde_mm512_mask_range_pd(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_maskz_range_pd(k, a, b, imm8) _mm512_maskz_range_pd(k, a, b, imm8)
#else
#define simde_mm512_maskz_range_pd(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_range_pd(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_range_pd
#define _mm512_maskz_range_pd(k, a, b, imm8) simde_mm512_maskz_range_pd(k, a, b, imm8)
#endif
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
#define simde_x_mm_range_ss(a, b, imm8) simde_mm_move_ss(a, simde_mm_range_ps(a, b, imm8))
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
#define simde_x_mm_range_ss(a, b, imm8) simde_mm_move_ss(a, simde_mm_range_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b), imm8))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_range_ss (simde__m128 a, simde__m128 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128_private
r_ = simde__m128_to_private(a),
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
simde_float32 abs_a = simde_uint32_as_float32(a_.u32[0] & UINT32_C(2147483647));
simde_float32 abs_b = simde_uint32_as_float32(b_.u32[0] & UINT32_C(2147483647));
switch (imm8 & 3) {
case 0:
r_ = simde__m128_to_private(simde_mm_min_ss(a, b));
break;
case 1:
r_ = simde__m128_to_private(simde_mm_max_ss(a, b));
break;
case 2:
r_.f32[0] = abs_a <= abs_b ? a_.f32[0] : b_.f32[0];
break;
case 3:
r_.f32[0] = abs_b >= abs_a ? b_.f32[0] : a_.f32[0];
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r_.f32[0] = simde_uint32_as_float32((a_.u32[0] & UINT32_C(2147483648)) ^ (r_.u32[0] & UINT32_C(2147483647)));
break;
case 8:
r_.f32[0] = simde_uint32_as_float32(r_.u32[0] & UINT32_C(2147483647));
break;
case 12:
r_.f32[0] = simde_uint32_as_float32(r_.u32[0] | UINT32_C(2147483648));
break;
default:
break;
}
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm_mask_range_ss(src, k, a, b, imm8) _mm_mask_range_ss(src, k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_mask_range_ss(src, k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m128_private \
simde_mm_mask_range_ss_r_ = simde__m128_to_private(a), \
simde_mm_mask_range_ss_src_ = simde__m128_to_private(src); \
\
if (k & 1) \
simde_mm_mask_range_ss_r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8)); \
else \
simde_mm_mask_range_ss_r_.f32[0] = simde_mm_mask_range_ss_src_.f32[0]; \
\
simde__m128_from_private(simde_mm_mask_range_ss_r_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_range_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128_private
r_ = simde__m128_to_private(a),
src_ = simde__m128_to_private(src);
if (k & 1)
r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8));
else
r_.f32[0] = src_.f32[0];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_range_ss
#define _mm_mask_range_ss(src, k, a, b, imm8) simde_mm_mask_range_ss(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm_maskz_range_ss(k, a, b, imm8) _mm_maskz_range_ss(k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_maskz_range_ss(k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m128_private simde_mm_maskz_range_ss_r_ = simde__m128_to_private(a); \
\
if (k & 1) \
simde_mm_maskz_range_ss_r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8)); \
else \
simde_mm_maskz_range_ss_r_.f32[0] = SIMDE_FLOAT32_C(0.0); \
\
simde__m128_from_private(simde_mm_maskz_range_ss_r_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_range_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128_private r_ = simde__m128_to_private(a);
if (k & 1)
r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8));
else
r_.f32[0] = SIMDE_FLOAT32_C(0.0);
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_range_ss
#define _mm_maskz_range_ss(k, a, b, imm8) simde_mm_mask_range_ss(k, a, b, imm8)
#endif
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
#define simde_x_mm_range_sd(a, b, imm8) simde_mm_move_sd(a, simde_mm_range_pd(a, b, imm8))
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
#define simde_x_mm_range_sd(a, b, imm8) simde_mm_move_sd(a, simde_mm_range_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b), imm8))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_range_sd (simde__m128d a, simde__m128d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128d_private
r_ = simde__m128d_to_private(a),
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
simde_float64 abs_a = simde_uint64_as_float64(a_.u64[0] & UINT64_C(9223372036854775807));
simde_float64 abs_b = simde_uint64_as_float64(b_.u64[0] & UINT64_C(9223372036854775807));
switch (imm8 & 3) {
case 0:
r_ = simde__m128d_to_private(simde_mm_min_sd(a, b));
break;
case 1:
r_ = simde__m128d_to_private(simde_mm_max_sd(a, b));
break;
case 2:
r_.f64[0] = abs_a <= abs_b ? a_.f64[0] : b_.f64[0];
break;
case 3:
r_.f64[0] = abs_b >= abs_a ? b_.f64[0] : a_.f64[0];
break;
default:
break;
}
switch (imm8 & 12) {
case 0:
r_.f64[0] = simde_uint64_as_float64((a_.u64[0] & UINT64_C(9223372036854775808)) ^ (r_.u64[0] & UINT64_C(9223372036854775807)));
break;
case 8:
r_.f64[0] = simde_uint64_as_float64(r_.u64[0] & UINT64_C(9223372036854775807));
break;
case 12:
r_.f64[0] = simde_uint64_as_float64(r_.u64[0] | UINT64_C(9223372036854775808));
break;
default:
break;
}
return simde__m128d_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm_mask_range_sd(src, k, a, b, imm8) _mm_mask_range_sd(src, k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_mask_range_sd(src, k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d_private \
simde_mm_mask_range_sd_r_ = simde__m128d_to_private(a), \
simde_mm_mask_range_sd_src_ = simde__m128d_to_private(src); \
\
if (k & 1) \
simde_mm_mask_range_sd_r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8)); \
else \
simde_mm_mask_range_sd_r_.f64[0] = simde_mm_mask_range_sd_src_.f64[0]; \
\
simde__m128d_from_private(simde_mm_mask_range_sd_r_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_range_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128d_private
r_ = simde__m128d_to_private(a),
src_ = simde__m128d_to_private(src);
if (k & 1)
r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8));
else
r_.f64[0] = src_.f64[0];
return simde__m128d_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_range_sd
#define _mm_mask_range_sd(src, k, a, b, imm8) simde_mm_mask_range_sd(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm_maskz_range_sd(k, a, b, imm8) _mm_maskz_range_sd(k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_maskz_range_sd(k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d_private simde_mm_maskz_range_sd_r_ = simde__m128d_to_private(a); \
\
if (k & 1) \
simde_mm_maskz_range_sd_r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8)); \
else \
simde_mm_maskz_range_sd_r_.f64[0] = SIMDE_FLOAT64_C(0.0); \
\
simde__m128d_from_private(simde_mm_maskz_range_sd_r_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_range_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) {
simde__m128d_private r_ = simde__m128d_to_private(a);
if (k & 1)
r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8));
else
r_.f64[0] = SIMDE_FLOAT64_C(0.0);
return simde__m128d_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_range_sd
#define _mm_maskz_range_sd(k, a, b, imm8) simde_mm_mask_range_sd(k, a, b, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_RANGE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cmple.h | .h | 50,422 | 1,433 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_AVX512_CMPLE_H)
#define SIMDE_X86_AVX512_CMPLE_H
#include "types.h"
#include "mov.h"
#include "mov_mask.h"
#include "movm.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi8(_mm_cmple_epi8_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcleq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmple(a_.altivec_i8, b_.altivec_i8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_cmple_epi8_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmple_epi8_mask(a, b);
#else
return simde_mm_movepi8_mask(simde_x_mm_cmple_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi8_mask
#define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_cmple_epi8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmple_epi8_mask(k, a, b);
#else
return k & simde_mm_cmple_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epi8_mask
#define _mm_mask_cmple_epi8_mask(src, k, a, b) simde_mm_mask_cmple_epi8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epi8 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi8(_mm256_cmple_epi8_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi8(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_cmple_epi8_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmple_epi8_mask(a, b);
#else
return simde_mm256_movepi8_mask(simde_x_mm256_cmple_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi8_mask
#define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_cmple_epi8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmple_epi8_mask(k, a, b);
#else
return k & simde_mm256_cmple_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epi8_mask
#define _mm256_mask_cmple_epi8_mask(src, k, a, b) simde_mm256_mask_cmple_epi8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi8(_mm512_cmple_epi8_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi8(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epi8(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmple_epi8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmple_epi8_mask(a, b);
#else
return simde_mm512_movepi8_mask(simde_x_mm512_cmple_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi8_mask
#define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_cmple_epi8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmple_epi8_mask(k, a, b);
#else
return k & simde_mm512_cmple_epi8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epi8_mask
#define _mm512_mask_cmple_epi8_mask(src, k, a, b) simde_mm512_mask_cmple_epi8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi8(_mm_cmple_epu8_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcleq_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmple(a_.altivec_u8, b_.altivec_u8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_cmple_epu8_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmple_epu8_mask(a, b);
#else
return simde_mm_movepi8_mask(simde_x_mm_cmple_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu8_mask
#define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_cmple_epu8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmple_epu8_mask(k, a, b);
#else
return k & simde_mm_cmple_epu8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epu8_mask
#define _mm_mask_cmple_epu8_mask(src, k, a, b) simde_mm_mask_cmple_epu8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epu8 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi8(_mm256_cmple_epu8_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu8(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_cmple_epu8_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmple_epu8_mask(a, b);
#else
return simde_mm256_movepi8_mask(simde_x_mm256_cmple_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu8_mask
#define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_cmple_epu8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmple_epu8_mask(k, a, b);
#else
return k & simde_mm256_cmple_epu8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epu8_mask
#define _mm256_mask_cmple_epu8_mask(src, k, a, b) simde_mm256_mask_cmple_epu8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epu8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi8(_mm512_cmple_epu8_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu8(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epu8(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmple_epu8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmple_epu8_mask(a, b);
#else
return simde_mm512_movepi8_mask(simde_x_mm512_cmple_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu8_mask
#define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_cmple_epu8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmple_epu8_mask(k, a, b);
#else
return k & simde_mm512_cmple_epu8_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epu8_mask
#define _mm512_mask_cmple_epu8_mask(src, k, a, b) simde_mm512_mask_cmple_epu8_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi16(_mm_cmple_epi16_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcleq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmple(a_.altivec_i16, b_.altivec_i16));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmple_epi16_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmple_epi16_mask(a, b);
#else
return simde_mm_movepi16_mask(simde_x_mm_cmple_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi16_mask
#define _mm512_cmple_epi16_mask(a, b) simde_mm512_cmple_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmple_epi16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmple_epi16_mask(k, a, b);
#else
return k & simde_mm_cmple_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epi16_mask
#define _mm_mask_cmple_epi16_mask(src, k, a, b) simde_mm_mask_cmple_epi16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epi16 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi16(_mm256_cmple_epi16_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi16(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_cmple_epi16_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmple_epi16_mask(a, b);
#else
return simde_mm256_movepi16_mask(simde_x_mm256_cmple_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi16_mask
#define _mm512_cmple_epi16_mask(a, b) simde_mm512_cmple_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_mask_cmple_epi16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmple_epi16_mask(k, a, b);
#else
return k & simde_mm256_cmple_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epi16_mask
#define _mm256_mask_cmple_epi16_mask(src, k, a, b) simde_mm256_mask_cmple_epi16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi16(_mm512_cmple_epi16_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi16(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epi16(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_cmple_epi16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmple_epi16_mask(a, b);
#else
return simde_mm512_movepi16_mask(simde_x_mm512_cmple_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi16_mask
#define _mm512_cmple_epi16_mask(a, b) simde_mm512_cmple_epi16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_mask_cmple_epi16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmple_epi16_mask(k, a, b);
#else
return k & simde_mm512_cmple_epi16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epi16_mask
#define _mm512_mask_cmple_epi16_mask(src, k, a, b) simde_mm512_mask_cmple_epi16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epu16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_movm_epi16(_mm_cmple_epu16_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcleq_u16(a_.neon_u16, b_.neon_u16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u16x8_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmple(a_.altivec_u16, b_.altivec_u16));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmple_epu16_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_cmple_epu16_mask(a, b);
#else
return simde_mm_movepi16_mask(simde_x_mm_cmple_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu16_mask
#define _mm512_cmple_epu16_mask(a, b) simde_mm512_cmple_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmple_epu16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_cmple_epu16_mask(k, a, b);
#else
return k & simde_mm_cmple_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epu16_mask
#define _mm_mask_cmple_epu16_mask(src, k, a, b) simde_mm_mask_cmple_epu16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epu16 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm256_movm_epi16(_mm256_cmple_epu16_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu16(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_cmple_epu16_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_cmple_epu16_mask(a, b);
#else
return simde_mm256_movepi16_mask(simde_x_mm256_cmple_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu16_mask
#define _mm512_cmple_epu16_mask(a, b) simde_mm512_cmple_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm256_mask_cmple_epu16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_cmple_epu16_mask(k, a, b);
#else
return k & simde_mm256_cmple_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epu16_mask
#define _mm256_mask_cmple_epu16_mask(src, k, a, b) simde_mm256_mask_cmple_epu16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epu16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return simde_mm512_movm_epi16(_mm512_cmple_epu16_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu16(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epu16(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_cmple_epu16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmple_epu16_mask(a, b);
#else
return simde_mm512_movepi16_mask(simde_x_mm512_cmple_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu16_mask
#define _mm512_cmple_epu16_mask(a, b) simde_mm512_cmple_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_mask_cmple_epu16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmple_epu16_mask(k, a, b);
#else
return k & simde_mm512_cmple_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epu16_mask
#define _mm512_mask_cmple_epu16_mask(src, k, a, b) simde_mm512_mask_cmple_epu16_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi32(_mm_cmple_epi32_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmple(a_.altivec_i32, b_.altivec_i32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmple_epi32_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmple_epi32_mask(a, b);
#else
return simde_mm_movepi32_mask(simde_x_mm_cmple_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi32_mask
#define _mm512_cmple_epi32_mask(a, b) simde_mm512_cmple_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmple_epi32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmple_epi32_mask(k, a, b);
#else
return k & simde_mm_cmple_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epi32_mask
#define _mm_mask_cmple_epi32_mask(src, k, a, b) simde_mm_mask_cmple_epi32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epi32 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi32(_mm256_cmple_epi32_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi32(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmple_epi32_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmple_epi32_mask(a, b);
#else
return simde_mm256_movepi32_mask(simde_x_mm256_cmple_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi32_mask
#define _mm512_cmple_epi32_mask(a, b) simde_mm512_cmple_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmple_epi32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmple_epi32_mask(k, a, b);
#else
return k & simde_mm256_cmple_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epi32_mask
#define _mm256_mask_cmple_epi32_mask(src, k, a, b) simde_mm256_mask_cmple_epi32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi32(_mm512_cmple_epi32_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi32(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epi32(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmple_epi32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmple_epi32_mask(a, b);
#else
return simde_mm512_movepi32_mask(simde_x_mm512_cmple_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi32_mask
#define _mm512_cmple_epi32_mask(a, b) simde_mm512_cmple_epi32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_cmple_epi32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmple_epi32_mask(k, a, b);
#else
return k & simde_mm512_cmple_epi32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epi32_mask
#define _mm512_mask_cmple_epi32_mask(src, k, a, b) simde_mm512_mask_cmple_epi32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epu32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi32(_mm_cmple_epu32_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_u32(a_.neon_u32, b_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmple(a_.altivec_u32, b_.altivec_u32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmple_epu32_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmple_epu32_mask(a, b);
#else
return simde_mm_movepi32_mask(simde_x_mm_cmple_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu32_mask
#define _mm512_cmple_epu32_mask(a, b) simde_mm512_cmple_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmple_epu32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmple_epu32_mask(k, a, b);
#else
return k & simde_mm_cmple_epu32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epu32_mask
#define _mm_mask_cmple_epu32_mask(src, k, a, b) simde_mm_mask_cmple_epu32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epu32 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi32(_mm256_cmple_epu32_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu32(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmple_epu32_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmple_epu32_mask(a, b);
#else
return simde_mm256_movepi32_mask(simde_x_mm256_cmple_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu32_mask
#define _mm512_cmple_epu32_mask(a, b) simde_mm512_cmple_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmple_epu32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmple_epu32_mask(k, a, b);
#else
return k & simde_mm256_cmple_epu32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epu32_mask
#define _mm256_mask_cmple_epu32_mask(src, k, a, b) simde_mm256_mask_cmple_epu32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epu32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi32(_mm512_cmple_epu32_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu32(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epu32(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmple_epu32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmple_epu32_mask(a, b);
#else
return simde_mm512_movepi32_mask(simde_x_mm512_cmple_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu32_mask
#define _mm512_cmple_epu32_mask(a, b) simde_mm512_cmple_epu32_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_cmple_epu32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmple_epu32_mask(k, a, b);
#else
return k & simde_mm512_cmple_epu32_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epu32_mask
#define _mm512_mask_cmple_epu32_mask(src, k, a, b) simde_mm512_mask_cmple_epu32_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi64(_mm_cmple_epi64_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcleq_s64(a_.neon_i64, b_.neon_i64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_cmple(a_.altivec_i64, b_.altivec_i64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmple_epi64_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmple_epi64_mask(a, b);
#else
return simde_mm_movepi64_mask(simde_x_mm_cmple_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_cmple_epi64_mask
#define _mm_cmple_epi64_mask(a, b) simde_mm_cmple_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmple_epi64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmple_epi64_mask(k, a, b);
#else
return k & simde_mm_cmple_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epi64_mask
#define _mm_mask_cmple_epi64_mask(src, k, a, b) simde_mm_mask_cmple_epi64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epi64 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi64(_mm256_cmple_epi64_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi64(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmple_epi64_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmple_epi64_mask(a, b);
#else
return simde_mm256_movepi64_mask(simde_x_mm256_cmple_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_cmple_epi64_mask
#define _mm256_cmple_epi64_mask(a, b) simde_mm256_cmple_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmple_epi64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmple_epi64_mask(k, a, b);
#else
return k & simde_mm256_cmple_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epi64_mask
#define _mm256_mask_cmple_epi64_mask(src, k, a, b) simde_mm256_mask_cmple_epi64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi64(_mm512_cmple_epi64_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epi64(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epi64(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmple_epi64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmple_epi64_mask(a, b);
#else
return simde_mm512_movepi64_mask(simde_x_mm512_cmple_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epi64_mask
#define _mm512_cmple_epi64_mask(a, b) simde_mm512_cmple_epi64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_cmple_epi64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmple_epi64_mask(k, a, b);
#else
return k & simde_mm512_cmple_epi64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epi64_mask
#define _mm512_mask_cmple_epi64_mask(src, k, a, b) simde_mm512_mask_cmple_epi64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cmple_epu64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm_movm_epi64(_mm_cmple_epu64_mask(a, b));
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcleq_u64(a_.neon_u64, b_.neon_u64);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmple(a_.altivec_u64, b_.altivec_u64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_cmple_epu64_mask (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_cmple_epu64_mask(a, b);
#else
return simde_mm_movepi64_mask(simde_x_mm_cmple_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu64_mask
#define _mm512_cmple_epu64_mask(a, b) simde_mm512_cmple_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm_mask_cmple_epu64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_cmple_epu64_mask(k, a, b);
#else
return k & simde_mm_cmple_epu64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cmple_epu64_mask
#define _mm_mask_cmple_epu64_mask(src, k, a, b) simde_mm_mask_cmple_epu64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_cmple_epu64 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return simde_mm256_movm_epi64(_mm256_cmple_epu64_mask(a, b));
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu64(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_cmple_epu64_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_cmple_epu64_mask(a, b);
#else
return simde_mm256_movepi64_mask(simde_x_mm256_cmple_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu64_mask
#define _mm512_cmple_epu64_mask(a, b) simde_mm512_cmple_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_cmple_epu64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_cmple_epu64_mask(k, a, b);
#else
return k & simde_mm256_cmple_epu64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_cmple_epu64_mask
#define _mm256_mask_cmple_epu64_mask(src, k, a, b) simde_mm256_mask_cmple_epu64_mask((src), (k), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_cmple_epu64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return simde_mm512_movm_epi64(_mm512_cmple_epu64_mask(a, b));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_cmple_epu64(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_cmple_epu64(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmple_epu64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmple_epu64_mask(a, b);
#else
return simde_mm512_movepi64_mask(simde_x_mm512_cmple_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmple_epu64_mask
#define _mm512_cmple_epu64_mask(a, b) simde_mm512_cmple_epu64_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_cmple_epu64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmple_epu64_mask(k, a, b);
#else
return k & simde_mm512_cmple_epu64_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmple_epu64_mask
#define _mm512_mask_cmple_epu64_mask(src, k, a, b) simde_mm512_mask_cmple_epu64_mask((src), (k), (a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CMPLE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/round.h | .h | 13,094 | 283 | #if !defined(SIMDE_X86_AVX512_ROUND_H)
#define SIMDE_X86_AVX512_ROUND_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_x_mm512_round_ps(a, rounding) SIMDE_STATEMENT_EXPR_(({ \
simde__m512_private \
simde_x_mm512_round_ps_r_ = simde__m512_to_private(simde_mm512_setzero_ps()), \
simde_x_mm512_round_ps_a_ = simde__m512_to_private(a); \
\
for (size_t simde_x_mm512_round_ps_i = 0 ; simde_x_mm512_round_ps_i < (sizeof(simde_x_mm512_round_ps_r_.m256) / sizeof(simde_x_mm512_round_ps_r_.m256[0])) ; simde_x_mm512_round_ps_i++) { \
simde_x_mm512_round_ps_r_.m256[simde_x_mm512_round_ps_i] = simde_mm256_round_ps(simde_x_mm512_round_ps_a_.m256[simde_x_mm512_round_ps_i], rounding); \
} \
\
simde__m512_from_private(simde_x_mm512_round_ps_r_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_x_mm512_round_ps (simde__m512 a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m512_private
r_,
a_ = simde__m512_to_private(a);
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.m128_private[i].altivec_f32));
}
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].neon_f32 = vrndiq_f32(a_.m128_private[i].neon_f32);
}
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.m128_private[i].altivec_f32));
}
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].neon_f32 = vrndnq_f32(a_.m128_private[i].neon_f32);
}
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.m128_private[i].altivec_f32));
}
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].neon_f32 = vrndmq_f32(a_.m128_private[i].neon_f32);
}
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.m128_private[i].altivec_f32));
}
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].neon_f32 = vrndpq_f32(a_.m128_private[i].neon_f32);
}
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.m128_private[i].altivec_f32));
}
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].neon_f32 = vrndq_f32(a_.m128_private[i].neon_f32);
}
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps());
}
return simde__m512_from_private(r_);
}
#endif
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_x_mm512_round_pd(a, rounding) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d_private \
simde_x_mm512_round_pd_r_ = simde__m512d_to_private(simde_mm512_setzero_pd()), \
simde_x_mm512_round_pd_a_ = simde__m512d_to_private(a); \
\
for (size_t simde_x_mm512_round_pd_i = 0 ; simde_x_mm512_round_pd_i < (sizeof(simde_x_mm512_round_pd_r_.m256d) / sizeof(simde_x_mm512_round_pd_r_.m256d[0])) ; simde_x_mm512_round_pd_i++) { \
simde_x_mm512_round_pd_r_.m256d[simde_x_mm512_round_pd_i] = simde_mm256_round_pd(simde_x_mm512_round_pd_a_.m256d[simde_x_mm512_round_pd_i], rounding); \
} \
\
simde__m512d_from_private(simde_x_mm512_round_pd_r_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_x_mm512_round_pd (simde__m512d a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a);
/* For architectures which lack a current direction SIMD instruction. */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_round(a_.m128d_private[i].altivec_f64));
}
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].neon_f64 = vrndiq_f64(a_.m128d_private[i].neon_f64);
}
#elif defined(simde_math_nearbyint)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_nearbyint(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_round(a_.m128d_private[i].altivec_f64));
}
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].neon_f64 = vrndaq_f64(a_.m128d_private[i].neon_f64);
}
#elif defined(simde_math_roundeven)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_roundeven(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_floor(a_.m128d_private[i].altivec_f64));
}
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].neon_f64 = vrndmq_f64(a_.m128d_private[i].neon_f64);
}
#elif defined(simde_math_floor)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_floor(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_ceil(a_.m128d_private[i].altivec_f64));
}
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].neon_f64 = vrndpq_f64(a_.m128d_private[i].neon_f64);
}
#elif defined(simde_math_ceil)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_ceil(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_trunc(a_.m128d_private[i].altivec_f64));
}
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].neon_f64 = vrndq_f64(a_.m128d_private[i].neon_f64);
}
#elif defined(simde_math_trunc)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_trunc(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd());
}
return simde__m512d_from_private(r_);
}
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ROUND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/negate.h | .h | 2,639 | 89 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_NEGATE_H)
#define SIMDE_X86_AVX512_NEGATE_H
#include "types.h"
#include "mov.h"
#include "xor.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_x_mm512_negate_ps(simde__m512 a) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return simde_mm512_xor_ps(a,_mm512_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a);
#if defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m512_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_x_mm512_negate_pd(simde__m512d a) {
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
return simde_mm512_xor_pd(a, _mm512_set1_pd(SIMDE_FLOAT64_C(-0.0)));
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a);
#if defined(SIMDE_VECTOR_NEGATE)
r_.f64 = -a_.f64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = -a_.f64[i];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_NEGATE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/popcnt.h | .h | 46,335 | 1,347 | #if !defined(SIMDE_X86_AVX512_POPCNT_H)
#define SIMDE_X86_AVX512_POPCNT_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_popcnt_epi8 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_popcnt_epi8(a);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vcntq_s8(a_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_popcnt(a_.wasm_v128);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
const __m128i low_nibble_set = _mm_set1_epi8(0x0f);
const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n);
const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n);
const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
r_.n =
_mm_add_epi8(
_mm_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm_shuffle_epi8(
lut,
_mm_srli_epi16(
high_nibble_of_input,
4
)
)
);
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* v -= ((v >> 1) & UINT8_C(0x55)); */
r_.n =
_mm_sub_epi8(
a_.n,
_mm_and_si128(
_mm_srli_epi16(a_.n, 1),
_mm_set1_epi8(0x55)
)
);
/* v = (v & 0x33) + ((v >> 2) & 0x33); */
r_.n =
_mm_add_epi8(
_mm_and_si128(
r_.n,
_mm_set1_epi8(0x33)
),
_mm_and_si128(
_mm_srli_epi16(r_.n, 2),
_mm_set1_epi8(0x33)
)
);
/* v = (v + (v >> 4)) & 0xf */
r_.n =
_mm_and_si128(
_mm_add_epi8(
r_.n,
_mm_srli_epi16(r_.n, 4)
),
_mm_set1_epi8(0x0f)
);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), a_.altivec_i8)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u8 -= ((a_.u8 >> 1) & 0x55);
a_.u8 = ((a_.u8 & 0x33) + ((a_.u8 >> 2) & 0x33));
a_.u8 = (a_.u8 + (a_.u8 >> 4)) & 15;
r_.u8 = a_.u8 >> ((sizeof(uint8_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]);
v -= ((v >> 1) & 0x55);
v = (v & 0x33) + ((v >> 2) & 0x33);
v = (v + (v >> 4)) & 0xf;
r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_popcnt_epi8
#define _mm_popcnt_epi8(a) simde_mm_popcnt_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_popcnt_epi8 (simde__m128i src, simde__mmask16 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_popcnt_epi8(src, k, a);
#else
return simde_mm_mask_mov_epi8(src, k, simde_mm_popcnt_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_popcnt_epi8
#define _mm_mask_popcnt_epi8(src, k, a) simde_mm_mask_popcnt_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_popcnt_epi8 (simde__mmask16 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_popcnt_epi8(k, a);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_popcnt_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_popcnt_epi8
#define _mm_maskz_popcnt_epi8(k, a) simde_mm_maskz_popcnt_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_popcnt_epi16 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_popcnt_epi16(a);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vpaddlq_s8(vcntq_s8(a_.neon_i8));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_extadd_pairwise_i8x16(wasm_i8x16_popcnt(a_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), a_.altivec_u16)));
#elif defined(SIMDE_X86_XOP_NATIVE)
const __m128i low_nibble_set = _mm_set1_epi8(0x0f);
const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n);
const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n);
const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
r_.n =
_mm_haddw_epi8(
_mm_add_epi8(
_mm_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm_shuffle_epi8(
lut,
_mm_srli_epi16(high_nibble_of_input, 4)
)
)
);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.n =
_mm_sub_epi16(
a_.n,
_mm_and_si128(
_mm_srli_epi16(a_.n, 1),
_mm_set1_epi16(0x5555)
)
);
r_.n =
_mm_add_epi16(
_mm_and_si128(
r_.n,
_mm_set1_epi16(0x3333)
),
_mm_and_si128(
_mm_srli_epi16(r_.n, 2),
_mm_set1_epi16(0x3333)
)
);
r_.n =
_mm_and_si128(
_mm_add_epi16(
r_.n,
_mm_srli_epi16(r_.n, 4)
),
_mm_set1_epi16(0x0f0f)
);
r_.n =
_mm_srli_epi16(
_mm_mullo_epi16(
r_.n,
_mm_set1_epi16(0x0101)
),
(sizeof(uint16_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u16 -= ((a_.u16 >> 1) & UINT16_C(0x5555));
a_.u16 = ((a_.u16 & UINT16_C(0x3333)) + ((a_.u16 >> 2) & UINT16_C(0x3333)));
a_.u16 = (a_.u16 + (a_.u16 >> 4)) & UINT16_C(0x0f0f);
r_.u16 = (a_.u16 * UINT16_C(0x0101)) >> ((sizeof(uint16_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
uint16_t v = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i]);
v -= ((v >> 1) & UINT16_C(0x5555));
v = ((v & UINT16_C(0x3333)) + ((v >> 2) & UINT16_C(0x3333)));
v = (v + (v >> 4)) & UINT16_C(0x0f0f);
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (v * UINT16_C(0x0101))) >> ((sizeof(uint16_t) - 1) * CHAR_BIT);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_popcnt_epi16
#define _mm_popcnt_epi16(a) simde_mm_popcnt_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_popcnt_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_popcnt_epi16(src, k, a);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_popcnt_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_popcnt_epi16
#define _mm_mask_popcnt_epi16(src, k, a) simde_mm_mask_popcnt_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_popcnt_epi16 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_popcnt_epi16(k, a);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_popcnt_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_popcnt_epi16
#define _mm_maskz_popcnt_epi16(k, a) simde_mm_maskz_popcnt_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_popcnt_epi32 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_popcnt_epi32(a);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vpaddlq_s16(vpaddlq_s8(vcntq_s8(a_.neon_i8)));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), a_.altivec_u32)));
#elif defined(SIMDE_X86_XOP_NATIVE)
const __m128i low_nibble_set = _mm_set1_epi8(0x0f);
const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n);
const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n);
const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
r_.n =
_mm_haddd_epi8(
_mm_add_epi8(
_mm_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm_shuffle_epi8(
lut,
_mm_srli_epi16(high_nibble_of_input, 4)
)
)
);
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
r_.n =
_mm_sub_epi32(
a_.n,
_mm_and_si128(
_mm_srli_epi32(a_.n, 1),
_mm_set1_epi32(0x55555555)
)
);
r_.n =
_mm_add_epi32(
_mm_and_si128(
r_.n,
_mm_set1_epi32(0x33333333)
),
_mm_and_si128(
_mm_srli_epi32(r_.n, 2),
_mm_set1_epi32(0x33333333)
)
);
r_.n =
_mm_and_si128(
_mm_add_epi32(
r_.n,
_mm_srli_epi32(r_.n, 4)
),
_mm_set1_epi32(0x0f0f0f0f)
);
r_.n =
_mm_srli_epi32(
_mm_mullo_epi32(
r_.n,
_mm_set1_epi32(0x01010101)
),
(sizeof(uint32_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u32 -= ((a_.u32 >> 1) & UINT32_C(0x55555555));
a_.u32 = ((a_.u32 & UINT32_C(0x33333333)) + ((a_.u32 >> 2) & UINT32_C(0x33333333)));
a_.u32 = (a_.u32 + (a_.u32 >> 4)) & UINT32_C(0x0f0f0f0f);
r_.u32 = (a_.u32 * UINT32_C(0x01010101)) >> ((sizeof(uint32_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
uint32_t v = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i]);
v -= ((v >> 1) & UINT32_C(0x55555555));
v = ((v & UINT32_C(0x33333333)) + ((v >> 2) & UINT32_C(0x33333333)));
v = (v + (v >> 4)) & UINT32_C(0x0f0f0f0f);
r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (v * UINT32_C(0x01010101))) >> ((sizeof(uint32_t) - 1) * CHAR_BIT);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_popcnt_epi32
#define _mm_popcnt_epi32(a) simde_mm_popcnt_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_popcnt_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_popcnt_epi32(src, k, a);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_popcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_popcnt_epi32
#define _mm_mask_popcnt_epi32(src, k, a) simde_mm_mask_popcnt_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_popcnt_epi32 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_popcnt_epi32(k, a);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_popcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_popcnt_epi32
#define _mm_maskz_popcnt_epi32(k, a) simde_mm_maskz_popcnt_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_popcnt_epi64 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_popcnt_epi64(a);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vpaddlq_s32(vpaddlq_s16(vpaddlq_s8(vcntq_s8(a_.neon_i8))));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), a_.altivec_u64)));
#elif defined(SIMDE_X86_SSSE3_NATIVE)
const __m128i low_nibble_set = _mm_set1_epi8(0x0f);
const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n);
const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n);
const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
r_.n =
_mm_sad_epu8(
_mm_add_epi8(
_mm_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm_shuffle_epi8(
lut,
_mm_srli_epi16(high_nibble_of_input, 4)
)
),
_mm_setzero_si128()
);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.n =
_mm_sub_epi8(
a_.n,
_mm_and_si128(
_mm_srli_epi16(a_.n, 1),
_mm_set1_epi8(0x55)
)
);
r_.n =
_mm_add_epi8(
_mm_and_si128(
r_.n,
_mm_set1_epi8(0x33)
),
_mm_and_si128(
_mm_srli_epi16(r_.n, 2),
_mm_set1_epi8(0x33)
)
);
r_.n =
_mm_and_si128(
_mm_add_epi8(
r_.n,
_mm_srli_epi16(r_.n, 4)
),
_mm_set1_epi8(0x0f)
);
r_.n =
_mm_sad_epu8(
r_.n,
_mm_setzero_si128()
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u64 -= ((a_.u64 >> 1) & UINT64_C(0x5555555555555555));
a_.u64 = ((a_.u64 & UINT64_C(0x3333333333333333)) + ((a_.u64 >> 2) & UINT64_C(0x3333333333333333)));
a_.u64 = (a_.u64 + (a_.u64 >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
r_.u64 = (a_.u64 * UINT64_C(0x0101010101010101)) >> ((sizeof(uint64_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
uint64_t v = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i]);
v -= ((v >> 1) & UINT64_C(0x5555555555555555));
v = ((v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333)));
v = (v + (v >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, (v * UINT64_C(0x0101010101010101))) >> ((sizeof(uint64_t) - 1) * CHAR_BIT);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_popcnt_epi64
#define _mm_popcnt_epi64(a) simde_mm_popcnt_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_popcnt_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_popcnt_epi64(src, k, a);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_popcnt_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_popcnt_epi64
#define _mm_mask_popcnt_epi64(src, k, a) simde_mm_mask_popcnt_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_popcnt_epi64 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_popcnt_epi64(k, a);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_popcnt_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_popcnt_epi64
#define _mm_maskz_popcnt_epi64(k, a) simde_mm_maskz_popcnt_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_popcnt_epi8 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_popcnt_epi8(a);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi8(a_.m128i[i]);
}
#elif defined(SIMDE_X86_AVX2_NATIVE)
const __m256i low_nibble_set = _mm256_set1_epi8(0x0f);
const __m256i high_nibble_of_input = _mm256_andnot_si256(low_nibble_set, a_.n);
const __m256i low_nibble_of_input = _mm256_and_si256(low_nibble_set, a_.n);
const __m256i lut =
_mm256_set_epi8(
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0
);
r_.n =
_mm256_add_epi8(
_mm256_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm256_shuffle_epi8(
lut,
_mm256_srli_epi16(
high_nibble_of_input,
4
)
)
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u8 -= ((a_.u8 >> 1) & 0x55);
a_.u8 = ((a_.u8 & 0x33) + ((a_.u8 >> 2) & 0x33));
a_.u8 = (a_.u8 + (a_.u8 >> 4)) & 15;
r_.u8 = a_.u8 >> ((sizeof(uint8_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]);
v -= ((v >> 1) & 0x55);
v = (v & 0x33) + ((v >> 2) & 0x33);
v = (v + (v >> 4)) & 0xf;
r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT;
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_popcnt_epi8
#define _mm256_popcnt_epi8(a) simde_mm256_popcnt_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_popcnt_epi8 (simde__m256i src, simde__mmask32 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_popcnt_epi8(src, k, a);
#else
return simde_mm256_mask_mov_epi8(src, k, simde_mm256_popcnt_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_popcnt_epi8
#define _mm256_mask_popcnt_epi8(src, k, a) simde_mm256_mask_popcnt_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_popcnt_epi8 (simde__mmask32 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_popcnt_epi8(k, a);
#else
return simde_mm256_maskz_mov_epi8(k, simde_mm256_popcnt_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_popcnt_epi8
#define _mm256_maskz_popcnt_epi8(k, a) simde_mm256_maskz_popcnt_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_popcnt_epi16 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_popcnt_epi16(a);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi16(a_.m128i[i]);
}
#elif defined(SIMDE_X86_AVX2_NATIVE)
r_.n =
_mm256_sub_epi16(
a_.n,
_mm256_and_si256(
_mm256_srli_epi16(a_.n, 1),
_mm256_set1_epi16(0x5555)
)
);
r_.n =
_mm256_add_epi16(
_mm256_and_si256(
r_.n,
_mm256_set1_epi16(0x3333)
),
_mm256_and_si256(
_mm256_srli_epi16(r_.n, 2),
_mm256_set1_epi16(0x3333)
)
);
r_.n =
_mm256_and_si256(
_mm256_add_epi16(
r_.n,
_mm256_srli_epi16(r_.n, 4)
),
_mm256_set1_epi16(0x0f0f)
);
r_.n =
_mm256_srli_epi16(
_mm256_mullo_epi16(
r_.n,
_mm256_set1_epi16(0x0101)
),
(sizeof(uint16_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u16 -= ((a_.u16 >> 1) & UINT16_C(0x5555));
a_.u16 = ((a_.u16 & UINT16_C(0x3333)) + ((a_.u16 >> 2) & UINT16_C(0x3333)));
a_.u16 = (a_.u16 + (a_.u16 >> 4)) & UINT16_C(0x0f0f);
r_.u16 = (a_.u16 * UINT16_C(0x0101)) >> ((sizeof(uint16_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
uint16_t v = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i]);
v -= ((v >> 1) & UINT16_C(0x5555));
v = ((v & UINT16_C(0x3333)) + ((v >> 2) & UINT16_C(0x3333)));
v = (v + (v >> 4)) & UINT16_C(0x0f0f);
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (v * UINT16_C(0x0101))) >> ((sizeof(uint16_t) - 1) * CHAR_BIT);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_popcnt_epi16
#define _mm256_popcnt_epi16(a) simde_mm256_popcnt_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_popcnt_epi16 (simde__m256i src, simde__mmask16 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_popcnt_epi16(src, k, a);
#else
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_popcnt_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_popcnt_epi16
#define _mm256_mask_popcnt_epi16(src, k, a) simde_mm256_mask_popcnt_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_popcnt_epi16 (simde__mmask16 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_popcnt_epi16(k, a);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_popcnt_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_popcnt_epi16
#define _mm256_maskz_popcnt_epi16(k, a) simde_mm256_maskz_popcnt_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_popcnt_epi32 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_popcnt_epi32(a);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi32(a_.m128i[i]);
}
#elif defined(SIMDE_X86_AVX2_NATIVE)
r_.n =
_mm256_sub_epi32(
a_.n,
_mm256_and_si256(
_mm256_srli_epi32(a_.n, 1),
_mm256_set1_epi32(0x55555555)
)
);
r_.n =
_mm256_add_epi32(
_mm256_and_si256(
r_.n,
_mm256_set1_epi32(0x33333333)
),
_mm256_and_si256(
_mm256_srli_epi32(r_.n, 2),
_mm256_set1_epi32(0x33333333)
)
);
r_.n =
_mm256_and_si256(
_mm256_add_epi32(
r_.n,
_mm256_srli_epi32(r_.n, 4)
),
_mm256_set1_epi32(0x0f0f0f0f)
);
r_.n =
_mm256_srli_epi32(
_mm256_mullo_epi32(
r_.n,
_mm256_set1_epi32(0x01010101)
),
(sizeof(uint32_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u32 -= ((a_.u32 >> 1) & UINT32_C(0x55555555));
a_.u32 = ((a_.u32 & UINT32_C(0x33333333)) + ((a_.u32 >> 2) & UINT32_C(0x33333333)));
a_.u32 = (a_.u32 + (a_.u32 >> 4)) & UINT32_C(0x0f0f0f0f);
r_.u32 = (a_.u32 * UINT32_C(0x01010101)) >> ((sizeof(uint32_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
uint32_t v = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i]);
v -= ((v >> 1) & UINT32_C(0x55555555));
v = ((v & UINT32_C(0x33333333)) + ((v >> 2) & UINT32_C(0x33333333)));
v = (v + (v >> 4)) & UINT32_C(0x0f0f0f0f);
r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (v * UINT32_C(0x01010101))) >> ((sizeof(uint32_t) - 1) * CHAR_BIT);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_popcnt_epi32
#define _mm256_popcnt_epi32(a) simde_mm256_popcnt_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_popcnt_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_popcnt_epi32(src, k, a);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_popcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_popcnt_epi32
#define _mm256_mask_popcnt_epi32(src, k, a) simde_mm256_mask_popcnt_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_popcnt_epi32 (simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_popcnt_epi32(k, a);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_popcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_popcnt_epi32
#define _mm256_maskz_popcnt_epi32(k, a) simde_mm256_maskz_popcnt_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_popcnt_epi64 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_popcnt_epi64(a);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < sizeof(r_.m128i) / sizeof(r_.m128i[0]) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi64(a_.m128i[i]);
}
#elif defined(SIMDE_X86_AVX2_NATIVE)
const __m256i low_nibble_set = _mm256_set1_epi8(0x0f);
const __m256i high_nibble_of_input = _mm256_andnot_si256(low_nibble_set, a_.n);
const __m256i low_nibble_of_input = _mm256_and_si256(low_nibble_set, a_.n);
const __m256i lut =
_mm256_set_epi8(
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0
);
r_.n =
_mm256_sad_epu8(
_mm256_add_epi8(
_mm256_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm256_shuffle_epi8(
lut,
_mm256_srli_epi16(high_nibble_of_input, 4)
)
),
_mm256_setzero_si256()
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u64 -= ((a_.u64 >> 1) & UINT64_C(0x5555555555555555));
a_.u64 = ((a_.u64 & UINT64_C(0x3333333333333333)) + ((a_.u64 >> 2) & UINT64_C(0x3333333333333333)));
a_.u64 = (a_.u64 + (a_.u64 >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
r_.u64 = (a_.u64 * UINT64_C(0x0101010101010101)) >> ((sizeof(uint64_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
uint64_t v = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i]);
v -= ((v >> 1) & UINT64_C(0x5555555555555555));
v = ((v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333)));
v = (v + (v >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, (v * UINT64_C(0x0101010101010101))) >> ((sizeof(uint64_t) - 1) * CHAR_BIT);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_popcnt_epi64
#define _mm256_popcnt_epi64(a) simde_mm256_popcnt_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_popcnt_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_popcnt_epi64(src, k, a);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_popcnt_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_popcnt_epi64
#define _mm256_mask_popcnt_epi64(src, k, a) simde_mm256_mask_popcnt_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_popcnt_epi64 (simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_popcnt_epi64(k, a);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_popcnt_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_popcnt_epi64
#define _mm256_maskz_popcnt_epi64(k, a) simde_mm256_maskz_popcnt_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_popcnt_epi8 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_popcnt_epi8(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi8(a_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_popcnt_epi8(a_.m256i[i]);
}
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
const __m512i low_nibble_set = _mm512_set1_epi8(0x0f);
const __m512i high_nibble_of_input = _mm512_andnot_si512(low_nibble_set, a_.n);
const __m512i low_nibble_of_input = _mm512_and_si512(low_nibble_set, a_.n);
const __m512i lut =
simde_mm512_set_epi8(
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0
);
r_.n =
_mm512_add_epi8(
_mm512_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm512_shuffle_epi8(
lut,
_mm512_srli_epi16(
high_nibble_of_input,
4
)
)
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u8 -= ((a_.u8 >> 1) & 0x55);
a_.u8 = ((a_.u8 & 0x33) + ((a_.u8 >> 2) & 0x33));
a_.u8 = (a_.u8 + (a_.u8 >> 4)) & 15;
r_.u8 = a_.u8 >> ((sizeof(uint8_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]);
v -= ((v >> 1) & 0x55);
v = (v & 0x33) + ((v >> 2) & 0x33);
v = (v + (v >> 4)) & 0xf;
r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT;
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_popcnt_epi8
#define _mm512_popcnt_epi8(a) simde_mm512_popcnt_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_popcnt_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_mask_popcnt_epi8(src, k, a);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_popcnt_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_popcnt_epi8
#define _mm512_mask_popcnt_epi8(src, k, a) simde_mm512_mask_popcnt_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_popcnt_epi8 (simde__mmask64 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_maskz_popcnt_epi8(k, a);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_popcnt_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_popcnt_epi8
#define _mm512_maskz_popcnt_epi8(k, a) simde_mm512_maskz_popcnt_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_popcnt_epi16 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_popcnt_epi16(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi16(a_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_popcnt_epi16(a_.m256i[i]);
}
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
r_.n =
_mm512_sub_epi16(
a_.n,
_mm512_and_si512(
_mm512_srli_epi16(a_.n, 1),
_mm512_set1_epi16(0x5555)
)
);
r_.n =
_mm512_add_epi16(
_mm512_and_si512(
r_.n,
_mm512_set1_epi16(0x3333)
),
_mm512_and_si512(
_mm512_srli_epi16(r_.n, 2),
_mm512_set1_epi16(0x3333)
)
);
r_.n =
_mm512_and_si512(
_mm512_add_epi16(
r_.n,
_mm512_srli_epi16(r_.n, 4)
),
_mm512_set1_epi16(0x0f0f)
);
r_.n =
_mm512_srli_epi16(
_mm512_mullo_epi16(
r_.n,
_mm512_set1_epi16(0x0101)
),
(sizeof(uint16_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u16 -= ((a_.u16 >> 1) & UINT16_C(0x5555));
a_.u16 = ((a_.u16 & UINT16_C(0x3333)) + ((a_.u16 >> 2) & UINT16_C(0x3333)));
a_.u16 = (a_.u16 + (a_.u16 >> 4)) & UINT16_C(0x0f0f);
r_.u16 = (a_.u16 * UINT16_C(0x0101)) >> ((sizeof(uint16_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
uint16_t v = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i]);
v -= ((v >> 1) & UINT16_C(0x5555));
v = ((v & UINT16_C(0x3333)) + ((v >> 2) & UINT16_C(0x3333)));
v = (v + (v >> 4)) & UINT16_C(0x0f0f);
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (v * UINT16_C(0x0101))) >> ((sizeof(uint16_t) - 1) * CHAR_BIT);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_popcnt_epi16
#define _mm512_popcnt_epi16(a) simde_mm512_popcnt_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_popcnt_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_mask_popcnt_epi16(src, k, a);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_popcnt_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_popcnt_epi16
#define _mm512_mask_popcnt_epi16(src, k, a) simde_mm512_mask_popcnt_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_popcnt_epi16 (simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_maskz_popcnt_epi16(k, a);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_popcnt_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_popcnt_epi16
#define _mm512_maskz_popcnt_epi16(k, a) simde_mm512_maskz_popcnt_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_popcnt_epi32 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
return _mm512_popcnt_epi32(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi32(a_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_popcnt_epi32(a_.m256i[i]);
}
#elif defined(SIMDE_X86_AVX512F_NATIVE)
r_.n =
_mm512_sub_epi32(
a_.n,
_mm512_and_si512(
_mm512_srli_epi32(a_.n, 1),
_mm512_set1_epi32(0x55555555)
)
);
r_.n =
_mm512_add_epi32(
_mm512_and_si512(
r_.n,
_mm512_set1_epi32(0x33333333)
),
_mm512_and_si512(
_mm512_srli_epi32(r_.n, 2),
_mm512_set1_epi32(0x33333333)
)
);
r_.n =
_mm512_and_si512(
_mm512_add_epi32(
r_.n,
_mm512_srli_epi32(r_.n, 4)
),
_mm512_set1_epi32(0x0f0f0f0f)
);
r_.n =
_mm512_srli_epi32(
_mm512_mullo_epi32(
r_.n,
_mm512_set1_epi32(0x01010101)
),
(sizeof(uint32_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u32 -= ((a_.u32 >> 1) & UINT32_C(0x55555555));
a_.u32 = ((a_.u32 & UINT32_C(0x33333333)) + ((a_.u32 >> 2) & UINT32_C(0x33333333)));
a_.u32 = (a_.u32 + (a_.u32 >> 4)) & UINT32_C(0x0f0f0f0f);
r_.u32 = (a_.u32 * UINT32_C(0x01010101)) >> ((sizeof(uint32_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
uint32_t v = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i]);
v -= ((v >> 1) & UINT32_C(0x55555555));
v = ((v & UINT32_C(0x33333333)) + ((v >> 2) & UINT32_C(0x33333333)));
v = (v + (v >> 4)) & UINT32_C(0x0f0f0f0f);
r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (v * UINT32_C(0x01010101))) >> ((sizeof(uint32_t) - 1) * CHAR_BIT);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_popcnt_epi32
#define _mm512_popcnt_epi32(a) simde_mm512_popcnt_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_popcnt_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
return _mm512_mask_popcnt_epi32(src, k, a);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_popcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_popcnt_epi32
#define _mm512_mask_popcnt_epi32(src, k, a) simde_mm512_mask_popcnt_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_popcnt_epi32 (simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
return _mm512_maskz_popcnt_epi32(k, a);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_popcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_popcnt_epi32
#define _mm512_maskz_popcnt_epi32(k, a) simde_mm512_maskz_popcnt_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_popcnt_epi64 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
return _mm512_popcnt_epi64(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_popcnt_epi64(a_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < sizeof(r_.m256i) / sizeof(r_.m256i[0]) ; i++) {
r_.m256i[i] = simde_mm256_popcnt_epi64(a_.m256i[i]);
}
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
const __m512i low_nibble_set = _mm512_set1_epi8(0x0f);
const __m512i high_nibble_of_input = _mm512_andnot_si512(low_nibble_set, a_.n);
const __m512i low_nibble_of_input = _mm512_and_si512(low_nibble_set, a_.n);
const __m512i lut =
simde_mm512_set_epi8(
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0,
4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0
);
r_.n =
_mm512_sad_epu8(
_mm512_add_epi8(
_mm512_shuffle_epi8(
lut,
low_nibble_of_input
),
_mm512_shuffle_epi8(
lut,
_mm512_srli_epi16(high_nibble_of_input, 4)
)
),
_mm512_setzero_si512()
);
#elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
r_.n =
_mm512_sub_epi64(
a_.n,
_mm512_and_si512(
_mm512_srli_epi64(a_.n, 1),
_mm512_set1_epi64(0x5555555555555555)
)
);
r_.n =
_mm512_add_epi64(
_mm512_and_si512(
r_.n,
_mm512_set1_epi64(0x3333333333333333)
),
_mm512_and_si512(
_mm512_srli_epi64(r_.n, 2),
_mm512_set1_epi64(0x3333333333333333)
)
);
r_.n =
_mm512_and_si512(
_mm512_add_epi64(
r_.n,
_mm512_srli_epi64(r_.n, 4)
),
_mm512_set1_epi64(0x0f0f0f0f0f0f0f0f)
);
r_.n =
_mm512_srli_epi64(
_mm512_mullo_epi64(
r_.n,
_mm512_set1_epi64(0x0101010101010101)
),
(sizeof(uint64_t) - 1) * CHAR_BIT
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
a_.u64 -= ((a_.u64 >> 1) & UINT64_C(0x5555555555555555));
a_.u64 = ((a_.u64 & UINT64_C(0x3333333333333333)) + ((a_.u64 >> 2) & UINT64_C(0x3333333333333333)));
a_.u64 = (a_.u64 + (a_.u64 >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
r_.u64 = (a_.u64 * UINT64_C(0x0101010101010101)) >> ((sizeof(uint64_t) - 1) * CHAR_BIT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
uint64_t v = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i]);
v -= ((v >> 1) & UINT64_C(0x5555555555555555));
v = ((v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333)));
v = (v + (v >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, (v * UINT64_C(0x0101010101010101))) >> ((sizeof(uint64_t) - 1) * CHAR_BIT);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_popcnt_epi64
#define _mm512_popcnt_epi64(a) simde_mm512_popcnt_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_popcnt_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
return _mm512_mask_popcnt_epi64(src, k, a);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_popcnt_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_popcnt_epi64
#define _mm512_mask_popcnt_epi64(src, k, a) simde_mm512_mask_popcnt_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_popcnt_epi64 (simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
return _mm512_maskz_popcnt_epi64(k, a);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_popcnt_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_popcnt_epi64
#define _mm512_maskz_popcnt_epi64(k, a) simde_mm512_maskz_popcnt_epi64(k, a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_POPCNT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/unpackhi.h | .h | 28,925 | 754 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_UNPACKHI_H)
#define SIMDE_X86_AVX512_UNPACKHI_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpackhi_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_unpackhi_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 64, a_.i8, b_.i8,
8, 72, 9, 73, 10, 74, 11, 75,
12, 76, 13, 77, 14, 78, 15, 79,
24, 88, 25, 89, 26, 90, 27, 91,
28, 92, 29, 93, 30, 94, 31, 95,
40, 104, 41, 105, 42, 106, 43, 107,
44, 108, 45, 109, 46, 110, 47, 111,
56, 120, 57, 121, 58, 122, 59, 123,
60, 124, 61, 125, 62, 126, 63, 127);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpackhi_epi8(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpackhi_epi8(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0]) / 2) ; i++) {
r_.i8[2 * i] = a_.i8[i + 8 + ~(~i | 7)];
r_.i8[2 * i + 1] = b_.i8[i + 8 + ~(~i | 7)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpackhi_epi8
#define _mm512_unpackhi_epi8(a, b) simde_mm512_unpackhi_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpackhi_epi8(simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_unpackhi_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_unpackhi_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpackhi_epi8
#define _mm512_mask_unpackhi_epi8(src, k, a, b) simde_mm512_mask_unpackhi_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpackhi_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_unpackhi_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_unpackhi_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpackhi_epi8
#define _mm512_maskz_unpackhi_epi8(k, a, b) simde_mm512_maskz_unpackhi_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpackhi_epi8(simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpackhi_epi8(src, k, a, b);
#else
return simde_mm256_mask_mov_epi8(src, k, simde_mm256_unpackhi_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpackhi_epi8
#define _mm256_mask_unpackhi_epi8(src, k, a, b) simde_mm256_mask_unpackhi_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpackhi_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpackhi_epi8(k, a, b);
#else
return simde_mm256_maskz_mov_epi8(k, simde_mm256_unpackhi_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpackhi_epi8
#define _mm256_maskz_unpackhi_epi8(k, a, b) simde_mm256_maskz_unpackhi_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpackhi_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpackhi_epi8(src, k, a, b);
#else
return simde_mm_mask_mov_epi8(src, k, simde_mm_unpackhi_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpackhi_epi8
#define _mm_mask_unpackhi_epi8(src, k, a, b) simde_mm_mask_unpackhi_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpackhi_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpackhi_epi8(k, a, b);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_unpackhi_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpackhi_epi8
#define _mm_maskz_unpackhi_epi8(k, a, b) simde_mm_maskz_unpackhi_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpackhi_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_unpackhi_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i16 =SIMDE_SHUFFLE_VECTOR_(16, 64, a_.i16, b_.i16,
4, 36, 5, 37, 6, 38, 7, 39, 12, 44, 13, 45, 14, 46, 15, 47,
20, 52, 21, 53, 22, 54, 23, 55, 28, 60, 29, 61, 30, 62, 31, 63);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpackhi_epi16(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpackhi_epi16(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0]) / 2) ; i++) {
r_.i16[2 * i] = a_.i16[i + 4 + ~(~i | 3)];
r_.i16[2 * i + 1] = b_.i16[i + 4 + ~(~i | 3)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpackhi_epi16
#define _mm512_unpackhi_epi16(a, b) simde_mm512_unpackhi_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpackhi_epi16(simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_unpackhi_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_unpackhi_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpackhi_epi16
#define _mm512_mask_unpackhi_epi16(src, k, a, b) simde_mm512_mask_unpackhi_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpackhi_epi16(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_unpackhi_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_unpackhi_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpackhi_epi16
#define _mm512_maskz_unpackhi_epi16(k, a, b) simde_mm512_maskz_unpackhi_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpackhi_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpackhi_epi16(src, k, a, b);
#else
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_unpackhi_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpackhi_epi16
#define _mm256_mask_unpackhi_epi16(src, k, a, b) simde_mm256_mask_unpackhi_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpackhi_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpackhi_epi16(k, a, b);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_unpackhi_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpackhi_epi16
#define _mm256_maskz_unpackhi_epi16(k, a, b) simde_mm256_maskz_unpackhi_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpackhi_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpackhi_epi16(src, k, a, b);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_unpackhi_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpackhi_epi16
#define _mm_mask_unpackhi_epi16(src, k, a, b) simde_mm_mask_unpackhi_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpackhi_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpackhi_epi16(k, a, b);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_unpackhi_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpackhi_epi16
#define _mm_maskz_unpackhi_epi16(k, a, b) simde_mm_maskz_unpackhi_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpackhi_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpackhi_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.i32, b_.i32,
2, 18, 3 , 19, 6, 22, 7, 23,
10, 26, 11, 27, 14, 30, 15, 31);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpackhi_epi32(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpackhi_epi32(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0]) / 2) ; i++) {
r_.i32[2 * i] = a_.i32[i + 2 + ~(~i | 1)];
r_.i32[2 * i + 1] = b_.i32[i + 2 + ~(~i | 1)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpackhi_epi32
#define _mm512_unpackhi_epi32(a, b) simde_mm512_unpackhi_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpackhi_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpackhi_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_unpackhi_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpackhi_epi32
#define _mm512_mask_unpackhi_epi32(src, k, a, b) simde_mm512_mask_unpackhi_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpackhi_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpackhi_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_unpackhi_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpackhi_epi32
#define _mm512_maskz_unpackhi_epi32(k, a, b) simde_mm512_maskz_unpackhi_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpackhi_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpackhi_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_unpackhi_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpackhi_epi32
#define _mm256_mask_unpackhi_epi32(src, k, a, b) simde_mm256_mask_unpackhi_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpackhi_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpackhi_epi32(k, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_unpackhi_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpackhi_epi32
#define _mm256_maskz_unpackhi_epi32(k, a, b) simde_mm256_maskz_unpackhi_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpackhi_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpackhi_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_unpackhi_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpackhi_epi32
#define _mm_mask_unpackhi_epi32(src, k, a, b) simde_mm_mask_unpackhi_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpackhi_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpackhi_epi32(k, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_unpackhi_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpackhi_epi32
#define _mm_maskz_unpackhi_epi32(k, a, b) simde_mm_maskz_unpackhi_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpackhi_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpackhi_epi64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.i64, b_.i64, 1, 9, 3, 11, 5, 13, 7, 15);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpackhi_epi64(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpackhi_epi64(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0]) / 2) ; i++) {
r_.i64[2 * i] = a_.i64[2 * i + 1];
r_.i64[2 * i + 1] = b_.i64[2 * i + 1];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpackhi_epi64
#define _mm512_unpackhi_epi64(a, b) simde_mm512_unpackhi_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpackhi_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpackhi_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_unpackhi_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpackhi_epi64
#define _mm512_mask_unpackhi_epi64(src, k, a, b) simde_mm512_mask_unpackhi_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpackhi_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpackhi_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_unpackhi_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpackhi_epi64
#define _mm512_maskz_unpackhi_epi64(k, a, b) simde_mm512_maskz_unpackhi_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpackhi_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpackhi_epi64(src, k, a, b);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_unpackhi_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpackhi_epi64
#define _mm256_mask_unpackhi_epi64(src, k, a, b) simde_mm256_mask_unpackhi_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpackhi_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpackhi_epi64(k, a, b);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_unpackhi_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpackhi_epi64
#define _mm256_maskz_unpackhi_epi64(k, a, b) simde_mm256_maskz_unpackhi_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpackhi_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpackhi_epi64(src, k, a, b);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_unpackhi_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpackhi_epi64
#define _mm_mask_unpackhi_epi64(src, k, a, b) simde_mm_mask_unpackhi_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpackhi_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpackhi_epi64(k, a, b);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_unpackhi_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpackhi_epi64
#define _mm_maskz_unpackhi_epi64(k, a, b) simde_mm_maskz_unpackhi_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_unpackhi_ps (simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpackhi_ps(a, b);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.f32, b_.f32,
2, 18, 3 , 19, 6, 22, 7, 23,
10, 26, 11, 27, 14, 30, 15, 31);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256[0] = simde_mm256_unpackhi_ps(a_.m256[0], b_.m256[0]);
r_.m256[1] = simde_mm256_unpackhi_ps(a_.m256[1], b_.m256[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0]) / 2) ; i++) {
r_.f32[2 * i] = a_.f32[i + 2 + ~(~i | 1)];
r_.f32[2 * i + 1] = b_.f32[i + 2 + ~(~i | 1)];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpackhi_ps
#define _mm512_unpackhi_ps(a, b) simde_mm512_unpackhi_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_unpackhi_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpackhi_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_unpackhi_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpackhi_ps
#define _mm512_mask_unpackhi_ps(src, k, a, b) simde_mm512_mask_unpackhi_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_unpackhi_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpackhi_ps(k, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_unpackhi_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpackhi_ps
#define _mm512_maskz_unpackhi_ps(k, a, b) simde_mm512_maskz_unpackhi_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_unpackhi_ps(simde__m256 src, simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpackhi_ps(src, k, a, b);
#else
return simde_mm256_mask_mov_ps(src, k, simde_mm256_unpackhi_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpackhi_ps
#define _mm256_mask_unpackhi_ps(src, k, a, b) simde_mm256_mask_unpackhi_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_unpackhi_ps(simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpackhi_ps(k, a, b);
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_unpackhi_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpackhi_ps
#define _mm256_maskz_unpackhi_ps(k, a, b) simde_mm256_maskz_unpackhi_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_unpackhi_ps(simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpackhi_ps(src, k, a, b);
#else
return simde_mm_mask_mov_ps(src, k, simde_mm_unpackhi_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpackhi_ps
#define _mm_mask_unpackhi_ps(src, k, a, b) simde_mm_mask_unpackhi_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_unpackhi_ps(simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpackhi_ps(k, a, b);
#else
return simde_mm_maskz_mov_ps(k, simde_mm_unpackhi_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpackhi_ps
#define _mm_maskz_unpackhi_ps(k, a, b) simde_mm_maskz_unpackhi_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_unpackhi_pd (simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpackhi_pd(a, b);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.f64, b_.f64, 1, 9, 3, 11, 5, 13, 7, 15);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256d[0] = simde_mm256_unpackhi_pd(a_.m256d[0], b_.m256d[0]);
r_.m256d[1] = simde_mm256_unpackhi_pd(a_.m256d[1], b_.m256d[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0]) / 2) ; i++) {
r_.f64[2 * i] = a_.f64[2 * i + 1];
r_.f64[2 * i + 1] = b_.f64[2 * i + 1];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpackhi_pd
#define _mm512_unpackhi_pd(a, b) simde_mm512_unpackhi_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_unpackhi_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpackhi_pd(src, k, a, b);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_unpackhi_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpackhi_pd
#define _mm512_mask_unpackhi_pd(src, k, a, b) simde_mm512_mask_unpackhi_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_unpackhi_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpackhi_pd(k, a, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_unpackhi_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpackhi_pd
#define _mm512_maskz_unpackhi_pd(k, a, b) simde_mm512_maskz_unpackhi_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_unpackhi_pd(simde__m256d src, simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpackhi_pd(src, k, a, b);
#else
return simde_mm256_mask_mov_pd(src, k, simde_mm256_unpackhi_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpackhi_pd
#define _mm256_mask_unpackhi_pd(src, k, a, b) simde_mm256_mask_unpackhi_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_maskz_unpackhi_pd(simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpackhi_pd(k, a, b);
#else
return simde_mm256_maskz_mov_pd(k, simde_mm256_unpackhi_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpackhi_pd
#define _mm256_maskz_unpackhi_pd(k, a, b) simde_mm256_maskz_unpackhi_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_unpackhi_pd(simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpackhi_pd(src, k, a, b);
#else
return simde_mm_mask_mov_pd(src, k, simde_mm_unpackhi_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpackhi_pd
#define _mm_mask_unpackhi_pd(src, k, a, b) simde_mm_mask_unpackhi_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_unpackhi_pd(simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpackhi_pd(k, a, b);
#else
return simde_mm_maskz_mov_pd(k, simde_mm_unpackhi_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpackhi_pd
#define _mm_maskz_unpackhi_pd(k, a, b) simde_mm_maskz_unpackhi_pd(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_UNPACKHI_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/test.h | .h | 7,540 | 233 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
* 2020 Christopher Moore <moore@free.fr>
* 2021 Andrew Rodriguez <anrodriguez@linkedin.com>
*/
#if !defined(SIMDE_X86_AVX512_TEST_H)
#define SIMDE_X86_AVX512_TEST_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_test_epi32_mask (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_test_epi32_mask(a, b);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
simde__mmask8 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask16, !!(a_.i32[i] & b_.i32[i]) << i);
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_test_epi32_mask
#define _mm256_test_epi32_mask(a, b) simde_mm256_test_epi32_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_mask_test_epi32_mask (simde__mmask8 k1, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_test_epi32_mask(k1, a, b);
#else
return simde_mm256_test_epi32_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_test_epi32_mask
#define _mm256_mask_test_epi32_mask(k1, a, b) simde_mm256_mask_test_epi32_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_test_epi16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_test_epi16_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask32 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask32, !!(a_.i16[i] & b_.i16[i]) << i);
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_test_epi16_mask
#define _mm512_test_epi16_mask(a, b) simde_mm512_test_epi16_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_test_epi32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_test_epi32_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask16 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask16, !!(a_.i32[i] & b_.i32[i]) << i);
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_test_epi32_mask
#define _mm512_test_epi32_mask(a, b) simde_mm512_test_epi32_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_test_epi64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_test_epi64_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask8 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask8, !!(a_.i64[i] & b_.i64[i]) << i);
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_test_epi64_mask
#define _mm512_test_epi64_mask(a, b) simde_mm512_test_epi64_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_test_epi8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_test_epi8_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask64 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
r |= HEDLEY_STATIC_CAST(simde__mmask64, HEDLEY_STATIC_CAST(uint64_t, !!(a_.i8[i] & b_.i8[i])) << i);
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_test_epi8_mask
#define _mm512_test_epi8_mask(a, b) simde_mm512_test_epi8_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_mask_test_epi16_mask (simde__mmask32 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_test_epi16_mask(k1, a, b);
#else
return simde_mm512_test_epi16_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_test_epi16_mask
#define _mm512_mask_test_epi16_mask(k1, a, b) simde_mm512_mask_test_epi16_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_test_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_test_epi32_mask(k1, a, b);
#else
return simde_mm512_test_epi32_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_test_epi32_mask
#define _mm512_mask_test_epi32_mask(k1, a, b) simde_mm512_mask_test_epi32_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_test_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_test_epi64_mask(k1, a, b);
#else
return simde_mm512_test_epi64_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_test_epi64_mask
#define _mm512_mask_test_epi64_mask(k1, a, b) simde_mm512_mask_test_epi64_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_test_epi8_mask (simde__mmask64 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_test_epi8_mask(k1, a, b);
#else
return simde_mm512_test_epi8_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_test_epi8_mask
#define _mm512_mask_test_epi8_mask(k1, a, b) simde_mm512_mask_test_epi8_mask(k1, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_TEST_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/fmsub.h | .h | 9,231 | 277 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 kitegi <kitegi@users.noreply.github.com>
*/
#if !defined(SIMDE_X86_AVX512_FMSUB_H)
#define SIMDE_X86_AVX512_FMSUB_H
#include "types.h"
#include "mov.h"
#include "../fma.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask3_fmsub_pd (simde__m256d a, simde__m256d b, simde__m256d c, simde__mmask8 k) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask3_fmsub_pd(a, b, c, k);
#else
return simde_mm256_mask_mov_pd(c, k, simde_mm256_fmsub_pd(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask3_fmsub_pd
#define _mm256_mask3_fmsub_pd(a, b, c, k) _mm256_mask3_fmsub_pd(a, b, c, k)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_fmsub_pd (simde__m256d a, simde__mmask8 k, simde__m256d b, simde__m256d c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_fmsub_pd(a, k, b, c);
#else
return simde_mm256_mask_mov_pd(a, k, simde_mm256_fmsub_pd(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_fmsub_pd
#define _mm256_mask_fmsub_pd(a, k, b, c) _mm256_mask_fmsub_pd(a, k, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_maskz_fmsub_pd (simde__mmask8 k, simde__m256d a, simde__m256d b, simde__m256d c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_fmsub_pd(k, a, b, c);
#else
return simde_mm256_maskz_mov_pd(k, simde_mm256_fmsub_pd(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_fmsub_pd
#define _mm256_maskz_fmsub_pd(k, a, b, c) _mm256_maskz_fmsub_pd(k, a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask3_fmsub_pd (simde__m128d a, simde__m128d b, simde__m128d c, simde__mmask8 k) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask3_fmsub_pd(a, b, c, k);
#else
return simde_mm_mask_mov_pd(c, k, simde_mm_fmsub_pd(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask3_fmsub_pd
#define _mm_mask3_fmsub_pd(a, b, c, k) _mm_mask3_fmsub_pd(a, b, c, k)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_fmsub_pd (simde__m128d a, simde__mmask8 k, simde__m128d b, simde__m128d c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_fmsub_pd(a, k, b, c);
#else
return simde_mm_mask_mov_pd(a, k, simde_mm_fmsub_pd(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_fmsub_pd
#define _mm_mask_fmsub_pd(a, k, b, c) _mm_mask_fmsub_pd(a, k, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_fmsub_pd (simde__mmask8 k, simde__m128d a, simde__m128d b, simde__m128d c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_fmsub_pd(k, a, b, c);
#else
return simde_mm_maskz_mov_pd(k, simde_mm_fmsub_pd(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_fmsub_pd
#define _mm_maskz_fmsub_pd(k, a, b, c) _mm_maskz_fmsub_pd(k, a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask3_fmsub_ps (simde__m256 a, simde__m256 b, simde__m256 c, simde__mmask8 k) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask3_fmsub_ps(a, b, c, k);
#else
return simde_mm256_mask_mov_ps(c, k, simde_mm256_fmsub_ps(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask3_fmsub_ps
#define _mm256_mask3_fmsub_ps(a, b, c, k) _mm256_mask3_fmsub_ps(a, b, c, k)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_fmsub_ps (simde__m256 a, simde__mmask8 k, simde__m256 b, simde__m256 c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_fmsub_ps(a, k, b, c);
#else
return simde_mm256_mask_mov_ps(a, k, simde_mm256_fmsub_ps(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_fmsub_ps
#define _mm256_mask_fmsub_ps(a, k, b, c) _mm256_mask_fmsub_ps(a, k, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_fmsub_ps (simde__mmask8 k, simde__m256 a, simde__m256 b, simde__m256 c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_fmsub_ps(k, a, b, c);
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_fmsub_ps(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_fmsub_ps
#define _mm256_maskz_fmsub_ps(k, a, b, c) _mm256_maskz_fmsub_ps(k, a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask3_fmsub_ps (simde__m128 a, simde__m128 b, simde__m128 c, simde__mmask8 k) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask3_fmsub_ps(a, b, c, k);
#else
return simde_mm_mask_mov_ps(c, k, simde_mm_fmsub_ps(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask3_fmsub_ps
#define _mm_mask3_fmsub_ps(a, b, c, k) _mm_mask3_fmsub_ps(a, b, c, k)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_fmsub_ps (simde__m128 a, simde__mmask8 k, simde__m128 b, simde__m128 c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_fmsub_ps(a, k, b, c);
#else
return simde_mm_mask_mov_ps(a, k, simde_mm_fmsub_ps(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_fmsub_ps
#define _mm_mask_fmsub_ps(a, k, b, c) _mm_mask_fmsub_ps(a, k, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_fmsub_ps (simde__mmask8 k, simde__m128 a, simde__m128 b, simde__m128 c) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_fmsub_ps(k, a, b, c);
#else
return simde_mm_maskz_mov_ps(k, simde_mm_fmsub_ps(a, b, c));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_fmsub_ps
#define _mm_maskz_fmsub_ps(k, a, b, c) _mm_maskz_fmsub_ps(k, a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_fmsub_ps (simde__m512 a, simde__m512 b, simde__m512 c) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_fmsub_ps(a, b, c);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b),
c_ = simde__m512_to_private(c);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
r_.m256[i] = simde_mm256_fmsub_ps(a_.m256[i], b_.m256[i], c_.m256[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = (a_.f32 * b_.f32) - c_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] * b_.f32[i]) - c_.f32[i];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_fmsub_ps
#define _mm512_fmsub_ps(a, b, c) simde_mm512_fmsub_ps(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_fmsub_pd (simde__m512d a, simde__m512d b, simde__m512d c) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_fmsub_pd(a, b, c);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b),
c_ = simde__m512d_to_private(c);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) {
r_.m256d[i] = simde_mm256_fmsub_pd(a_.m256d[i], b_.m256d[i], c_.m256d[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = (a_.f64 * b_.f64) - c_.f64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = (a_.f64[i] * b_.f64[i]) - c_.f64[i];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_fmsub_pd
#define _mm512_fmsub_pd(a, b, c) simde_mm512_fmsub_pd(a, b, c)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_FMSUB_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/mov.h | .h | 30,522 | 866 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_MOV_H)
#define SIMDE_X86_AVX512_MOV_H
#include "types.h"
#include "cast.h"
#include "set.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_mov_epi8 (simde__m128i src, simde__mmask16 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_mov_epi8(src, k, a);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((k >> i) & 1) ? a_.i8[i] : src_.i8[i];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_mov_epi8
#define _mm_mask_mov_epi8(src, k, a) simde_mm_mask_mov_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_mov_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_mov_epi16(src, k, a);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((k >> i) & 1) ? a_.i16[i] : src_.i16[i];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_mov_epi16
#define _mm_mask_mov_epi16(src, k, a) simde_mm_mask_mov_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_mov_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_mov_epi32(src, k, a);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((k >> i) & 1) ? a_.i32[i] : src_.i32[i];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_mov_epi32
#define _mm_mask_mov_epi32(src, k, a) simde_mm_mask_mov_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_mov_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_mov_epi64(src, k, a);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
r_;
/* N.B. CM: No fallbacks as there are only two elements */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : src_.i64[i];
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_mov_epi64
#define _mm_mask_mov_epi64(src, k, a) simde_mm_mask_mov_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_mov_pd(simde__m128d src, simde__mmask8 k, simde__m128d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_mov_pd(src, k, a);
#else
return simde_mm_castsi128_pd(simde_mm_mask_mov_epi64(simde_mm_castpd_si128(src), k, simde_mm_castpd_si128(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_mov_pd
#define _mm_mask_mov_pd(src, k, a) simde_mm_mask_mov_pd(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_mov_ps (simde__m128 src, simde__mmask8 k, simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_mov_ps(src, k, a);
#else
return simde_mm_castsi128_ps(simde_mm_mask_mov_epi32(simde_mm_castps_si128(src), k, simde_mm_castps_si128(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_mov_ps
#define _mm_mask_mov_ps(src, k, a) simde_mm_mask_mov_ps(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_mov_epi8 (simde__m256i src, simde__mmask32 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_mov_epi8(src, k, a);
#else
simde__m256i_private
r_,
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a);
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i[0] = simde_mm_mask_mov_epi8(src_.m128i[0], HEDLEY_STATIC_CAST(simde__mmask16, k ), a_.m128i[0]);
r_.m128i[1] = simde_mm_mask_mov_epi8(src_.m128i[1], HEDLEY_STATIC_CAST(simde__mmask16, k >> 16), a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((k >> i) & 1) ? a_.i8[i] : src_.i8[i];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_mov_epi8
#define _mm256_mask_mov_epi8(src, k, a) simde_mm256_mask_mov_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_mov_epi16 (simde__m256i src, simde__mmask16 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_mov_epi16(src, k, a);
#else
simde__m256i_private
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_mask_mov_epi16(src_.m128i[0], HEDLEY_STATIC_CAST(simde__mmask8, k ), a_.m128i[0]);
r_.m128i[1] = simde_mm_mask_mov_epi16(src_.m128i[1], HEDLEY_STATIC_CAST(simde__mmask8, k >> 8), a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((k >> i) & 1) ? a_.i16[i] : src_.i16[i];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_mov_epi16
#define _mm256_mask_mov_epi16(src, k, a) simde_mm256_mask_mov_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_mov_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_mov_epi32(src, k, a);
#else
simde__m256i_private
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_mask_mov_epi32(src_.m128i[0], k , a_.m128i[0]);
r_.m128i[1] = simde_mm_mask_mov_epi32(src_.m128i[1], k >> 4, a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((k >> i) & 1) ? a_.i32[i] : src_.i32[i];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_mov_epi32
#define _mm256_mask_mov_epi32(src, k, a) simde_mm256_mask_mov_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_mov_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_mov_epi64(src, k, a);
#else
simde__m256i_private
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a),
r_;
/* N.B. CM: This fallback may not be faster as there are only four elements */
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_mask_mov_epi64(src_.m128i[0], k , a_.m128i[0]);
r_.m128i[1] = simde_mm_mask_mov_epi64(src_.m128i[1], k >> 2, a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : src_.i64[i];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_mov_epi64
#define _mm256_mask_mov_epi64(src, k, a) simde_mm256_mask_mov_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_mov_pd (simde__m256d src, simde__mmask8 k, simde__m256d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_mov_pd(src, k, a);
#else
return simde_mm256_castsi256_pd(simde_mm256_mask_mov_epi64(simde_mm256_castpd_si256(src), k, simde_mm256_castpd_si256(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_mov_pd
#define _mm256_mask_mov_pd(src, k, a) simde_mm256_mask_mov_pd(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_mov_ps (simde__m256 src, simde__mmask8 k, simde__m256 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_mov_ps(src, k, a);
#else
return simde_mm256_castsi256_ps(simde_mm256_mask_mov_epi32(simde_mm256_castps_si256(src), k, simde_mm256_castps_si256(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_mov_ps
#define _mm256_mask_mov_ps(src, k, a) simde_mm256_mask_mov_ps(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_mov_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_mov_epi8(src, k, a);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
r_;
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m256i[0] = simde_mm256_mask_mov_epi8(src_.m256i[0], HEDLEY_STATIC_CAST(simde__mmask32, k ), a_.m256i[0]);
r_.m256i[1] = simde_mm256_mask_mov_epi8(src_.m256i[1], HEDLEY_STATIC_CAST(simde__mmask32, k >> 32), a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((k >> i) & 1) ? a_.i8[i] : src_.i8[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_mov_epi8
#define _mm512_mask_mov_epi8(src, k, a) simde_mm512_mask_mov_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_mov_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_mov_epi16(src, k, a);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m256i[0] = simde_mm256_mask_mov_epi16(src_.m256i[0], HEDLEY_STATIC_CAST(simde__mmask16, k ), a_.m256i[0]);
r_.m256i[1] = simde_mm256_mask_mov_epi16(src_.m256i[1], HEDLEY_STATIC_CAST(simde__mmask16, k >> 16), a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((k >> i) & 1) ? a_.i16[i] : src_.i16[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_mov_epi16
#define _mm512_mask_mov_epi16(src, k, a) simde_mm512_mask_mov_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_mov_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_mov_epi32(src, k, a);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m256i[0] = simde_mm256_mask_mov_epi32(src_.m256i[0], HEDLEY_STATIC_CAST(simde__mmask8, k ), a_.m256i[0]);
r_.m256i[1] = simde_mm256_mask_mov_epi32(src_.m256i[1], HEDLEY_STATIC_CAST(simde__mmask8, k >> 8), a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((k >> i) & 1) ? a_.i32[i] : src_.i32[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_mov_epi32
#define _mm512_mask_mov_epi32(src, k, a) simde_mm512_mask_mov_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_mov_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_mov_epi64(src, k, a);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
r_;
/* N.B. CM: Without AVX2 this fallback may not be faster as there are only eight elements */
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m256i[0] = simde_mm256_mask_mov_epi64(src_.m256i[0], k , a_.m256i[0]);
r_.m256i[1] = simde_mm256_mask_mov_epi64(src_.m256i[1], k >> 4, a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : src_.i64[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_mov_epi64
#define _mm512_mask_mov_epi64(src, k, a) simde_mm512_mask_mov_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_mov_pd (simde__m512d src, simde__mmask8 k, simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_mov_pd(src, k, a);
#else
return simde_mm512_castsi512_pd(simde_mm512_mask_mov_epi64(simde_mm512_castpd_si512(src), k, simde_mm512_castpd_si512(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_mov_pd
#define _mm512_mask_mov_pd(src, k, a) simde_mm512_mask_mov_pd(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_mov_ps (simde__m512 src, simde__mmask16 k, simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_mov_ps(src, k, a);
#else
return simde_mm512_castsi512_ps(simde_mm512_mask_mov_epi32(simde_mm512_castps_si512(src), k, simde_mm512_castps_si512(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_mov_ps
#define _mm512_mask_mov_ps(src, k, a) simde_mm512_mask_mov_ps(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512h
simde_x_mm512_mask_mov_ph (simde__m512h src, simde__mmask32 k, simde__m512h a) {
return simde_mm512_castsi512_ph(simde_mm512_mask_mov_epi16(simde_mm512_castph_si512(src), k, simde_mm512_castph_si512(a)));
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_mov_epi8 (simde__mmask16 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_mov_epi8(k, a);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((k >> i) & 1) ? a_.i8[i] : INT8_C(0);
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_mov_epi8
#define _mm_maskz_mov_epi8(k, a) simde_mm_maskz_mov_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_mov_epi16 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_mov_epi16(k, a);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((k >> i) & 1) ? a_.i16[i] : INT16_C(0);
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_mov_epi16
#define _mm_maskz_mov_epi16(k, a) simde_mm_maskz_mov_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_mov_epi32 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_mov_epi32(k, a);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((k >> i) & 1) ? a_.i32[i] : INT32_C(0);
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_mov_epi32
#define _mm_maskz_mov_epi32(k, a) simde_mm_maskz_mov_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_mov_epi64 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_mov_epi64(k, a);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
r_;
/* N.B. CM: No fallbacks as there are only two elements */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : INT64_C(0);
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_mov_epi64
#define _mm_maskz_mov_epi64(k, a) simde_mm_maskz_mov_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_mov_pd (simde__mmask8 k, simde__m128d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_mov_pd(k, a);
#else
return simde_mm_castsi128_pd(simde_mm_maskz_mov_epi64(k, simde_mm_castpd_si128(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_mov_pd
#define _mm_maskz_mov_pd(k, a) simde_mm_maskz_mov_pd(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_mov_ps (simde__mmask8 k, simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_mov_ps(k, a);
#else
return simde_mm_castsi128_ps(simde_mm_maskz_mov_epi32(k, simde_mm_castps_si128(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_mov_ps
#define _mm_maskz_mov_ps(k, a) simde_mm_maskz_mov_ps(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_mov_epi8 (simde__mmask32 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_mov_epi8(k, a);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
r_;
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i[0] = simde_mm_maskz_mov_epi8(HEDLEY_STATIC_CAST(simde__mmask16, k ), a_.m128i[0]);
r_.m128i[1] = simde_mm_maskz_mov_epi8(HEDLEY_STATIC_CAST(simde__mmask16, k >> 16), a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((k >> i) & 1) ? a_.i8[i] : INT8_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_mov_epi8
#define _mm256_maskz_mov_epi8(k, a) simde_mm256_maskz_mov_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_mov_epi16 (simde__mmask16 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_mov_epi16(k, a);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_maskz_mov_epi16(HEDLEY_STATIC_CAST(simde__mmask8, k ), a_.m128i[0]);
r_.m128i[1] = simde_mm_maskz_mov_epi16(HEDLEY_STATIC_CAST(simde__mmask8, k >> 8), a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((k >> i) & 1) ? a_.i16[i] : INT16_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_mov_epi16
#define _mm256_maskz_mov_epi16(k, a) simde_mm256_maskz_mov_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_mov_epi32 (simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_mov_epi32(k, a);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_maskz_mov_epi32(k , a_.m128i[0]);
r_.m128i[1] = simde_mm_maskz_mov_epi32(k >> 4, a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((k >> i) & 1) ? a_.i32[i] : INT32_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_mov_epi32
#define _mm256_maskz_mov_epi32(k, a) simde_mm256_maskz_mov_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_mov_epi64 (simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_mov_epi64(k, a);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
r_;
/* N.B. CM: This fallback may not be faster as there are only four elements */
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_maskz_mov_epi64(k , a_.m128i[0]);
r_.m128i[1] = simde_mm_maskz_mov_epi64(k >> 2, a_.m128i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : INT64_C(0);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_mov_epi64
#define _mm256_maskz_mov_epi64(k, a) simde_mm256_maskz_mov_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_maskz_mov_pd (simde__mmask8 k, simde__m256d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_mov_pd(k, a);
#else
return simde_mm256_castsi256_pd(simde_mm256_maskz_mov_epi64(k, simde_mm256_castpd_si256(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_mov_pd
#define _mm256_maskz_mov_pd(k, a) simde_mm256_maskz_mov_pd(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_mov_ps (simde__mmask8 k, simde__m256 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_mov_ps(k, a);
#else
return simde_mm256_castsi256_ps(simde_mm256_maskz_mov_epi32(k, simde_mm256_castps_si256(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_mov_ps
#define _mm256_maskz_mov_ps(k, a) simde_mm256_maskz_mov_ps(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_mov_epi8 (simde__mmask64 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_mov_epi8(k, a);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
r_;
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m256i[0] = simde_mm256_maskz_mov_epi8(HEDLEY_STATIC_CAST(simde__mmask32, k ), a_.m256i[0]);
r_.m256i[1] = simde_mm256_maskz_mov_epi8(HEDLEY_STATIC_CAST(simde__mmask32, k >> 32), a_.m256i[1]);
#else
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = ((k >> i) & 1) ? a_.i8[i] : INT8_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_mov_epi8
#define _mm512_maskz_mov_epi8(k, a) simde_mm512_maskz_mov_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_mov_epi16 (simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_mov_epi16(k, a);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m256i[0] = simde_mm256_maskz_mov_epi16(HEDLEY_STATIC_CAST(simde__mmask16, k ), a_.m256i[0]);
r_.m256i[1] = simde_mm256_maskz_mov_epi16(HEDLEY_STATIC_CAST(simde__mmask16, k >> 16), a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = ((k >> i) & 1) ? a_.i16[i] : INT16_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_mov_epi16
#define _mm512_maskz_mov_epi16(k, a) simde_mm512_maskz_mov_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_mov_epi32 (simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_mov_epi32(k, a);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m256i[0] = simde_mm256_maskz_mov_epi32(HEDLEY_STATIC_CAST(simde__mmask8, k ), a_.m256i[0]);
r_.m256i[1] = simde_mm256_maskz_mov_epi32(HEDLEY_STATIC_CAST(simde__mmask8, k >> 8), a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ((k >> i) & 1) ? a_.i32[i] : INT32_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_mov_epi32
#define _mm512_maskz_mov_epi32(k, a) simde_mm512_maskz_mov_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_mov_epi64 (simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_mov_epi64(k, a);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
r_;
/* N.B. CM: Without AVX2 this fallback may not be faster as there are only eight elements */
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m256i[0] = simde_mm256_maskz_mov_epi64(k , a_.m256i[0]);
r_.m256i[1] = simde_mm256_maskz_mov_epi64(k >> 4, a_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : INT64_C(0);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_mov_epi64
#define _mm512_maskz_mov_epi64(k, a) simde_mm512_maskz_mov_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_mov_pd (simde__mmask8 k, simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_mov_pd(k, a);
#else
return simde_mm512_castsi512_pd(simde_mm512_maskz_mov_epi64(k, simde_mm512_castpd_si512(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_mov_pd
#define _mm512_maskz_mov_pd(k, a) simde_mm512_maskz_mov_pd(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_mov_ps (simde__mmask16 k, simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_mov_ps(k, a);
#else
return simde_mm512_castsi512_ps(simde_mm512_maskz_mov_epi32(k, simde_mm512_castps_si512(a)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_mov_ps
#define _mm512_maskz_mov_ps(k, a) simde_mm512_maskz_mov_ps(k, a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_MOV_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/max.h | .h | 19,300 | 612 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_MAX_H)
#define SIMDE_X86_AVX512_MAX_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_max_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] > b_.i8[i]) ? a_.i8[i] : b_.i8[i];
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
# define _mm512_max_epi8(a, b) simde_mm512_max_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_max_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_max_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epi8
#define _mm512_mask_max_epi8(src, k, a, b) simde_mm512_mask_max_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_max_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_max_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epi8
#define _mm512_maskz_max_epi8(k, a, b) simde_mm512_maskz_max_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epu8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_max_epu8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_max_epu8(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_epu8
#define _mm512_max_epu8(a, b) simde_mm512_max_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epu8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_max_epu8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_max_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epu8
#define _mm512_mask_max_epu8(src, k, a, b) simde_mm512_mask_max_epu8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epu8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_max_epu8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_max_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epu8
#define _mm512_maskz_max_epu8(k, a, b) simde_mm512_maskz_max_epu8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_max_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
# define _mm512_max_epi16(a, b) simde_mm512_max_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_max_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_max_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epi16
#define _mm512_mask_max_epi16(src, k, a, b) simde_mm512_mask_max_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_max_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_max_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epi16
#define _mm512_maskz_max_epi16(k, a, b) simde_mm512_maskz_max_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epu16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_max_epu16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_max_epu16(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] > b_.u16[i]) ? a_.u16[i] : b_.u16[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_epu16
#define _mm512_max_epu16(a, b) simde_mm512_max_epu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epu16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_max_epu16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_max_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epu16
#define _mm512_mask_max_epu16(src, k, a, b) simde_mm512_mask_max_epu16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epu16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_max_epu16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_max_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epu16
#define _mm512_maskz_max_epu16(k, a, b) simde_mm512_maskz_max_epu16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_max_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_max_epi32(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_max_epi32(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] > b_.i32[i] ? a_.i32[i] : b_.i32[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_epi32
#define _mm512_max_epi32(a, b) simde_mm512_max_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_max_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_max_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epi32
#define _mm512_mask_max_epi32(src, k, a, b) simde_mm512_mask_max_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_max_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_max_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epi32
#define _mm512_maskz_max_epi32(k, a, b) simde_mm512_maskz_max_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epu32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_max_epu32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_max_epu32(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_max_epu32(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = (a_.u32[i] > b_.u32[i]) ? a_.u32[i] : b_.u32[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_epu32
#define _mm512_max_epu32(a, b) simde_mm512_max_epu32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epu32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_max_epu32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_max_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epu32
#define _mm512_mask_max_epu32(src, k, a, b) simde_mm512_mask_max_epu32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epu32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_max_epu32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_max_epu32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epu32
#define _mm512_maskz_max_epu32(k, a, b) simde_mm512_maskz_max_epu32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_max_epi64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] > b_.i64[i] ? a_.i64[i] : b_.i64[i];
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_epi64
#define _mm512_max_epi64(a, b) simde_mm512_max_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_max_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_max_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epi64
#define _mm512_mask_max_epi64(src, k, a, b) simde_mm512_mask_max_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_max_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_max_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epi64
#define _mm512_maskz_max_epi64(k, a, b) simde_mm512_maskz_max_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_max_epu64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_max_epu64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = (a_.u64[i] > b_.u64[i]) ? a_.u64[i] : b_.u64[i];
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_epu64
#define _mm512_max_epu64(a, b) simde_mm512_max_epu64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_max_epu64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_max_epu64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_max_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_epu64
#define _mm512_mask_max_epu64(src, k, a, b) simde_mm512_mask_max_epu64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_max_epu64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_max_epu64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_max_epu64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_epu64
#define _mm512_maskz_max_epu64(k, a, b) simde_mm512_maskz_max_epu64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_max_ps (simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_max_ps(a, b);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256[0] = simde_mm256_max_ps(a_.m256[0], b_.m256[0]);
r_.m256[1] = simde_mm256_max_ps(a_.m256[1], b_.m256[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] > b_.f32[i] ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_ps
#define _mm512_max_ps(a, b) simde_mm512_max_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_max_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_max_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_max_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_ps
#define _mm512_mask_max_ps(src, k, a, b) simde_mm512_mask_max_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_max_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_max_ps(k, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_max_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_ps
#define _mm512_maskz_max_ps(k, a, b) simde_mm512_maskz_max_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_max_pd (simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_max_pd(a, b);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f64[i] > b_.f64[i] ? a_.f64[i] : b_.f64[i];
}
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_pd
#define _mm512_max_pd(a, b) simde_mm512_max_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512h
simde_mm512_max_ph (simde__m512h a, simde__m512h b) {
#if defined(SIMDE_X86_AVX512FP16_NATIVE)
return _mm512_max_ph(a, b);
#else
simde__m512h_private
r_,
a_ = simde__m512h_to_private(a),
b_ = simde__m512h_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f16) / sizeof(r_.f16[0])) ; i++) {
r_.f16[i] = simde_float16_to_float32(a_.f16[i]) > simde_float16_to_float32(b_.f16[i]) ? a_.f16[i] : b_.f16[i];
}
return simde__m512h_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512FP16_ENABLE_NATIVE_ALIASES)
#undef _mm512_max_ph
#define _mm512_max_ph(a, b) simde_mm512_max_ph(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_max_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_max_pd(src, k, a, b);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_max_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_max_pd
#define _mm512_mask_max_pd(src, k, a, b) simde_mm512_mask_max_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_max_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_max_pd(k, a, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_max_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_max_pd
#define _mm512_maskz_max_pd(k, a, b) simde_mm512_maskz_max_pd(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_MAX_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/testn.h | .h | 2,130 | 64 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Andrew Rodriguez <anrodriguez@linkedin.com>
*/
#if !defined(SIMDE_X86_AVX512_TESTN_H)
#define SIMDE_X86_AVX512_TESTN_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_testn_epi64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_testn_epi64_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask8 r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) {
r |= (!(a_.i64[i] & b_.i64[i])) << i;
}
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_testn_epi64_mask
#define _mm512_testn_epi64_mask(a, b) simde_mm512_testn_epi64_mask(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_TESTN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/kshift.h | .h | 6,599 | 153 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_KSHIFT_H)
#define SIMDE_X86_AVX512_KSHIFT_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_kshiftli_mask16 (simde__mmask16 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return HEDLEY_STATIC_CAST(simde__mmask16, (count <= 15) ? (a << count) : 0);
}
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftli_mask16(a, count) _kshiftli_mask16(a, count)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _kshiftli_mask16
#define _kshiftli_mask16(a, count) simde_kshiftli_mask16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_kshiftli_mask32 (simde__mmask32 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return (count <= 31) ? (a << count) : 0;
}
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftli_mask32(a, count) _kshiftli_mask32(a, count)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _kshiftli_mask32
#define _kshiftli_mask32(a, count) simde_kshiftli_mask32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_kshiftli_mask64 (simde__mmask64 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return (count <= 63) ? (a << count) : 0;
}
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftli_mask64(a, count) _kshiftli_mask64(a, count)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _kshiftli_mask64
#define _kshiftli_mask64(a, count) simde_kshiftli_mask64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_kshiftli_mask8 (simde__mmask8 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return HEDLEY_STATIC_CAST(simde__mmask8, (count <= 7) ? (a << count) : 0);
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftli_mask8(a, count) _kshiftli_mask8(a, count)
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _kshiftli_mask8
#define _kshiftli_mask8(a, count) simde_kshiftli_mask8(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_kshiftri_mask16 (simde__mmask16 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return HEDLEY_STATIC_CAST(simde__mmask16, (count <= 15) ? (a >> count) : 0);
}
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftri_mask16(a, count) _kshiftri_mask16(a, count)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _kshiftri_mask16
#define _kshiftri_mask16(a, count) simde_kshiftri_mask16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_kshiftri_mask32 (simde__mmask32 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return (count <= 31) ? (a >> count) : 0;
}
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftri_mask32(a, count) _kshiftri_mask32(a, count)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _kshiftri_mask32
#define _kshiftri_mask32(a, count) simde_kshiftri_mask32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_kshiftri_mask64 (simde__mmask64 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return (count <= 63) ? (a >> count) : 0;
}
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftri_mask64(a, count) _kshiftri_mask64(a, count)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _kshiftri_mask64
#define _kshiftri_mask64(a, count) simde_kshiftri_mask64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_kshiftri_mask8 (simde__mmask8 a, unsigned int count)
SIMDE_REQUIRE_CONSTANT_RANGE(count, 0, 255) {
return HEDLEY_STATIC_CAST(simde__mmask8, (count <= 7) ? (a >> count) : 0);
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) && (!defined(SIMDE_DETECT_CLANG_VERSION) && SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_kshiftri_mask8(a, count) _kshiftri_mask8(a, count)
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _kshiftri_mask8
#define _kshiftri_mask8(a, count) simde_kshiftri_mask8(a, count)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_KSHIFT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/store.h | .h | 3,507 | 94 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_AVX512_STORE_H)
#define SIMDE_X86_AVX512_STORE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm512_store_ps (void * mem_addr, simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
_mm512_store_ps(mem_addr, a);
#else
simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512), &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_store_ps
#define _mm512_store_ps(mem_addr, a) simde_mm512_store_ps(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm512_store_pd (void * mem_addr, simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
_mm512_store_pd(mem_addr, a);
#else
simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512d), &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_store_pd
#define _mm512_store_pd(mem_addr, a) simde_mm512_store_pd(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm512_store_si512 (void * mem_addr, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
_mm512_store_si512(HEDLEY_REINTERPRET_CAST(void*, mem_addr), a);
#else
simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m512i), &a, sizeof(a));
#endif
}
#define simde_mm512_store_epi8(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define simde_mm512_store_epi16(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define simde_mm512_store_epi32(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define simde_mm512_store_epi64(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_store_epi8
#undef _mm512_store_epi16
#undef _mm512_store_epi32
#undef _mm512_store_epi64
#undef _mm512_store_si512
#define _mm512_store_si512(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define _mm512_store_epi8(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define _mm512_store_epi16(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define _mm512_store_epi32(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#define _mm512_store_epi64(mem_addr, a) simde_mm512_store_si512(mem_addr, a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_STORE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/scalef.h | .h | 14,399 | 390 | #if !defined(SIMDE_X86_AVX512_SCALEF_H)
#define SIMDE_X86_AVX512_SCALEF_H
#include "types.h"
#include "flushsubnormal.h"
#include "../svml.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_scalef_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_scalef_ps(a, b);
#else
return simde_mm_mul_ps(simde_x_mm_flushsubnormal_ps(a), simde_mm_exp2_ps(simde_mm_floor_ps(simde_x_mm_flushsubnormal_ps(b))));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_scalef_ps
#define _mm_scalef_ps(a, b) simde_mm_scalef_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_scalef_ps (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_scalef_ps(src, k, a, b);
#else
return simde_mm_mask_mov_ps(src, k, simde_mm_scalef_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_scalef_ps
#define _mm_mask_scalef_ps(src, k, a, b) simde_mm_mask_scalef_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_scalef_ps (simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_scalef_ps(k, a, b);
#else
return simde_mm_maskz_mov_ps(k, simde_mm_scalef_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_scalef_ps
#define _mm_maskz_scalef_ps(k, a, b) simde_mm_maskz_scalef_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_scalef_ps (simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_scalef_ps(a, b);
#else
return simde_mm256_mul_ps(simde_x_mm256_flushsubnormal_ps(a), simde_mm256_exp2_ps(simde_mm256_floor_ps(simde_x_mm256_flushsubnormal_ps(b))));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_scalef_ps
#define _mm256_scalef_ps(a, b) simde_mm256_scalef_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_scalef_ps (simde__m256 src, simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_scalef_ps(src, k, a, b);
#else
return simde_mm256_mask_mov_ps(src, k, simde_mm256_scalef_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_scalef_ps
#define _mm256_mask_scalef_ps(src, k, a, b) simde_mm256_mask_scalef_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_scalef_ps (simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_scalef_ps(k, a, b);
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_scalef_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_scalef_ps
#define _mm256_maskz_scalef_ps(k, a, b) simde_mm256_maskz_scalef_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_scalef_ps (simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_scalef_ps(a, b);
#else
return simde_mm512_mul_ps(simde_x_mm512_flushsubnormal_ps(a), simde_mm512_exp2_ps(simde_mm512_floor_ps(simde_x_mm512_flushsubnormal_ps(b))));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_scalef_ps
#define _mm512_scalef_ps(a, b) simde_mm512_scalef_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_scalef_ps (simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_scalef_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_scalef_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_scalef_ps
#define _mm512_mask_scalef_ps(src, k, a, b) simde_mm512_mask_scalef_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_scalef_ps (simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_scalef_ps(k, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_scalef_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_scalef_ps
#define _mm512_maskz_scalef_ps(k, a, b) simde_mm512_maskz_scalef_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_scalef_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_scalef_pd(a, b);
#else
return simde_mm_mul_pd(simde_x_mm_flushsubnormal_pd(a), simde_mm_exp2_pd(simde_mm_floor_pd(simde_x_mm_flushsubnormal_pd(b))));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_scalef_pd
#define _mm_scalef_pd(a, b) simde_mm_scalef_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_scalef_pd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_scalef_pd(src, k, a, b);
#else
return simde_mm_mask_mov_pd(src, k, simde_mm_scalef_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_scalef_pd
#define _mm_mask_scalef_pd(src, k, a, b) simde_mm_mask_scalef_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_scalef_pd (simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_scalef_pd(k, a, b);
#else
return simde_mm_maskz_mov_pd(k, simde_mm_scalef_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_scalef_pd
#define _mm_maskz_scalef_pd(k, a, b) simde_mm_maskz_scalef_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_scalef_pd (simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_scalef_pd(a, b);
#else
return simde_mm256_mul_pd(simde_x_mm256_flushsubnormal_pd(a), simde_mm256_exp2_pd(simde_mm256_floor_pd(simde_x_mm256_flushsubnormal_pd(b))));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_scalef_pd
#define _mm256_scalef_pd(a, b) simde_mm256_scalef_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_scalef_pd (simde__m256d src, simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_scalef_pd(src, k, a, b);
#else
return simde_mm256_mask_mov_pd(src, k, simde_mm256_scalef_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_scalef_pd
#define _mm256_mask_scalef_pd(src, k, a, b) simde_mm256_mask_scalef_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_maskz_scalef_pd (simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_scalef_pd(k, a, b);
#else
return simde_mm256_maskz_mov_pd(k, simde_mm256_scalef_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_scalef_pd
#define _mm256_maskz_scalef_pd(k, a, b) simde_mm256_maskz_scalef_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_scalef_pd (simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_scalef_pd(a, b);
#else
return simde_mm512_mul_pd(simde_x_mm512_flushsubnormal_pd(a), simde_mm512_exp2_pd(simde_mm512_floor_pd(simde_x_mm512_flushsubnormal_pd(b))));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_scalef_pd
#define _mm512_scalef_pd(a, b) simde_mm512_scalef_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_scalef_pd (simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_scalef_pd(src, k, a, b);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_scalef_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_scalef_pd
#define _mm512_mask_scalef_pd(src, k, a, b) simde_mm512_mask_scalef_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_scalef_pd (simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_scalef_pd(k, a, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_scalef_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_scalef_pd
#define _mm512_maskz_scalef_pd(k, a, b) simde_mm512_maskz_scalef_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_scalef_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm_scalef_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
a_.f32[0] = (simde_math_issubnormalf(a_.f32[0]) ? 0 : a_.f32[0]) * simde_math_exp2f(simde_math_floorf((simde_math_issubnormalf(b_.f32[0]) ? 0 : b_.f32[0])));
return simde__m128_from_private(a_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_scalef_ss
#define _mm_scalef_ss(a, b) simde_mm_scalef_ss(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_scalef_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(HEDLEY_GCC_VERSION)
return _mm_mask_scalef_round_ss(src, k, a, b, _MM_FROUND_CUR_DIRECTION);
#else
simde__m128_private
src_ = simde__m128_to_private(src),
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
a_.f32[0] = ((k & 1) ? ((simde_math_issubnormalf(a_.f32[0]) ? 0 : a_.f32[0]) * simde_math_exp2f(simde_math_floorf((simde_math_issubnormalf(b_.f32[0]) ? 0 : b_.f32[0])))) : src_.f32[0]);
return simde__m128_from_private(a_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_scalef_ss
#define _mm_mask_scalef_ss(src, k, a, b) simde_mm_mask_scalef_ss(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_scalef_ss (simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_GCC_105339)
return _mm_maskz_scalef_ss(k, a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
a_.f32[0] = ((k & 1) ? ((simde_math_issubnormalf(a_.f32[0]) ? 0 : a_.f32[0]) * simde_math_exp2f(simde_math_floorf((simde_math_issubnormalf(b_.f32[0]) ? 0 : b_.f32[0])))) : SIMDE_FLOAT32_C(0.0));
return simde__m128_from_private(a_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_scalef_ss
#define _mm_maskz_scalef_ss(k, a, b) simde_mm_maskz_scalef_ss(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_scalef_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm_scalef_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
a_.f64[0] = (simde_math_issubnormal(a_.f64[0]) ? 0 : a_.f64[0]) * simde_math_exp2(simde_math_floor((simde_math_issubnormal(b_.f64[0]) ? 0 : b_.f64[0])));
return simde__m128d_from_private(a_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_scalef_sd
#define _mm_scalef_sd(a, b) simde_mm_scalef_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_scalef_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_GCC_105339)
return _mm_mask_scalef_sd(src, k, a, b);
#else
simde__m128d_private
src_ = simde__m128d_to_private(src),
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
a_.f64[0] = ((k & 1) ? ((simde_math_issubnormal(a_.f64[0]) ? 0 : a_.f64[0]) * simde_math_exp2(simde_math_floor((simde_math_issubnormal(b_.f64[0]) ? 0 : b_.f64[0])))) : src_.f64[0]);
return simde__m128d_from_private(a_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_scalef_sd
#define _mm_mask_scalef_sd(src, k, a, b) simde_mm_mask_scalef_sd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_scalef_sd (simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_GCC_105339)
return _mm_maskz_scalef_sd(k, a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
a_.f64[0] = ((k & 1) ? ((simde_math_issubnormal(a_.f64[0]) ? 0 : a_.f64[0]) * simde_math_exp2(simde_math_floor(simde_math_issubnormal(b_.f64[0]) ? 0 : b_.f64[0]))) : SIMDE_FLOAT64_C(0.0));
return simde__m128d_from_private(a_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_scalef_sd
#define _mm_maskz_scalef_sd(k, a, b) simde_mm_maskz_scalef_sd(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SCALEF_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/4dpwssds.h | .h | 2,672 | 68 | #if !defined(SIMDE_X86_AVX512_4DPWSSDS_H)
#define SIMDE_X86_AVX512_4DPWSSDS_H
#include "types.h"
#include "dpwssds.h"
#include "set1.h"
#include "mov.h"
#include "adds.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_4dpwssds_epi32 (simde__m512i src, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) {
#if defined(SIMDE_X86_AVX5124VNNIW_NATIVE)
return _mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b);
#else
simde__m128i_private bv = simde__m128i_to_private(simde_mm_loadu_epi32(b));
simde__m512i r;
r = simde_mm512_dpwssds_epi32(src, a0, simde_mm512_set1_epi32(bv.i32[0]));
r = simde_x_mm512_adds_epi32(simde_mm512_dpwssds_epi32(src, a1, simde_mm512_set1_epi32(bv.i32[1])), r);
r = simde_x_mm512_adds_epi32(simde_mm512_dpwssds_epi32(src, a2, simde_mm512_set1_epi32(bv.i32[2])), r);
r = simde_x_mm512_adds_epi32(simde_mm512_dpwssds_epi32(src, a3, simde_mm512_set1_epi32(bv.i32[3])), r);
return r;
#endif
}
#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES)
#undef simde_mm512_4dpwssds_epi32
#define _mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b) simde_mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_4dpwssds_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) {
#if defined(SIMDE_X86_AVX5124VNNIW_NATIVE)
return _mm512_mask_4dpwssds_epi32(src, k, a0, a1, a2, a3, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b));
#endif
}
#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES)
#undef simde_mm512_mask_4dpwssds_epi32
#define _mm512_mask_4dpwssds_epi32(src, k, a0, a1, a2, a3, b) simde_mm512_mask_4dpwssds_epi32(src, k, a0, a1, a2, a3, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_4dpwssds_epi32 (simde__mmask16 k, simde__m512i src, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) {
#if defined(SIMDE_X86_AVX5124VNNIW_NATIVE)
return _mm512_mask_4dpwssds_epi32(k, src, a0, a1, a2, a3, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b));
#endif
}
#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES)
#undef simde_mm512_maskz_4dpwssds_epi32
#define _mm512_maskz_4dpwssds_epi32(k, src, a0, a1, a2, a3, b) simde_mm512_maskz_4dpwssds_epi32(k, src, a0, a1, a2, a3, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_4DPWSSDS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/setr4.h | .h | 3,735 | 141 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_SETR4_H)
#define SIMDE_X86_AVX512_SETR4_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_setr4_epi32 (int32_t d, int32_t c, int32_t b, int32_t a) {
simde__m512i_private r_;
r_.i32[ 0] = d;
r_.i32[ 1] = c;
r_.i32[ 2] = b;
r_.i32[ 3] = a;
r_.i32[ 4] = d;
r_.i32[ 5] = c;
r_.i32[ 6] = b;
r_.i32[ 7] = a;
r_.i32[ 8] = d;
r_.i32[ 9] = c;
r_.i32[10] = b;
r_.i32[11] = a;
r_.i32[12] = d;
r_.i32[13] = c;
r_.i32[14] = b;
r_.i32[15] = a;
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr4_epi32
#define _mm512_setr4_epi32(d,c,b,a) simde_mm512_setr4_epi32(d,c,b,a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_setr4_epi64 (int64_t d, int64_t c, int64_t b, int64_t a) {
simde__m512i_private r_;
r_.i64[0] = d;
r_.i64[1] = c;
r_.i64[2] = b;
r_.i64[3] = a;
r_.i64[4] = d;
r_.i64[5] = c;
r_.i64[6] = b;
r_.i64[7] = a;
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr4_epi64
#define _mm512_setr4_epi64(d,c,b,a) simde_mm512_setr4_epi64(d,c,b,a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_setr4_ps (simde_float32 d, simde_float32 c, simde_float32 b, simde_float32 a) {
simde__m512_private r_;
r_.f32[ 0] = d;
r_.f32[ 1] = c;
r_.f32[ 2] = b;
r_.f32[ 3] = a;
r_.f32[ 4] = d;
r_.f32[ 5] = c;
r_.f32[ 6] = b;
r_.f32[ 7] = a;
r_.f32[ 8] = d;
r_.f32[ 9] = c;
r_.f32[10] = b;
r_.f32[11] = a;
r_.f32[12] = d;
r_.f32[13] = c;
r_.f32[14] = b;
r_.f32[15] = a;
return simde__m512_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr4_ps
#define _mm512_setr4_ps(d,c,b,a) simde_mm512_setr4_ps(d,c,b,a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_setr4_pd (simde_float64 d, simde_float64 c, simde_float64 b, simde_float64 a) {
simde__m512d_private r_;
r_.f64[0] = d;
r_.f64[1] = c;
r_.f64[2] = b;
r_.f64[3] = a;
r_.f64[4] = d;
r_.f64[5] = c;
r_.f64[6] = b;
r_.f64[7] = a;
return simde__m512d_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr4_pd
#define _mm512_setr4_pd(d,c,b,a) simde_mm512_setr4_pd(d,c,b,a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SETR4_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/rolv.h | .h | 14,812 | 416 | #if !defined(SIMDE_X86_AVX512_ROLV_H)
#define SIMDE_X86_AVX512_ROLV_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "srlv.h"
#include "sllv.h"
#include "or.h"
#include "and.h"
#include "sub.h"
#include "set1.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_rolv_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_rolv_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_u32 = vec_rl(a_.altivec_u32, b_.altivec_u32);
return simde__m128i_from_private(r_);
#else
HEDLEY_STATIC_CAST(void, r_);
HEDLEY_STATIC_CAST(void, a_);
HEDLEY_STATIC_CAST(void, b_);
simde__m128i
count1 = simde_mm_and_si128(b, simde_mm_set1_epi32(31)),
count2 = simde_mm_sub_epi32(simde_mm_set1_epi32(32), count1);
return simde_mm_or_si128(simde_mm_sllv_epi32(a, count1), simde_mm_srlv_epi32(a, count2));
#endif
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_rolv_epi32
#define _mm_rolv_epi32(a, b) simde_mm_rolv_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_rolv_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_rolv_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_rolv_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_rolv_epi32
#define _mm_mask_rolv_epi32(src, k, a, b) simde_mm_mask_rolv_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_rolv_epi32 (simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_rolv_epi32(k, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_rolv_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_rolv_epi32
#define _mm_maskz_rolv_epi32(k, a, b) simde_mm_maskz_rolv_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_rolv_epi32 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_rolv_epi32(a, b);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) {
r_.m128i_private[i].altivec_u32 = vec_rl(a_.m128i_private[i].altivec_u32, b_.m128i_private[i].altivec_u32);
}
return simde__m256i_from_private(r_);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
r_.m128i[0] = simde_mm_rolv_epi32(a_.m128i[0], b_.m128i[0]);
r_.m128i[1] = simde_mm_rolv_epi32(a_.m128i[1], b_.m128i[1]);
return simde__m256i_from_private(r_);
#else
HEDLEY_STATIC_CAST(void, r_);
HEDLEY_STATIC_CAST(void, a_);
HEDLEY_STATIC_CAST(void, b_);
simde__m256i
count1 = simde_mm256_and_si256(b, simde_mm256_set1_epi32(31)),
count2 = simde_mm256_sub_epi32(simde_mm256_set1_epi32(32), count1);
return simde_mm256_or_si256(simde_mm256_sllv_epi32(a, count1), simde_mm256_srlv_epi32(a, count2));
#endif
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_rolv_epi32
#define _mm256_rolv_epi32(a, b) simde_mm256_rolv_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_rolv_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_rolv_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_rolv_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_rolv_epi32
#define _mm256_mask_rolv_epi32(src, k, a, b) simde_mm256_mask_rolv_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_rolv_epi32 (simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_rolv_epi32(k, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_rolv_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_rolv_epi32
#define _mm256_maskz_rolv_epi32(k, a, b) simde_mm256_maskz_rolv_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_rolv_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_rolv_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) {
r_.m128i_private[i].altivec_u32 = vec_rl(a_.m128i_private[i].altivec_u32, b_.m128i_private[i].altivec_u32);
}
return simde__m512i_from_private(r_);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_rolv_epi32(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_rolv_epi32(a_.m256i[1], b_.m256i[1]);
return simde__m512i_from_private(r_);
#else
HEDLEY_STATIC_CAST(void, r_);
HEDLEY_STATIC_CAST(void, a_);
HEDLEY_STATIC_CAST(void, b_);
simde__m512i
count1 = simde_mm512_and_si512(b, simde_mm512_set1_epi32(31)),
count2 = simde_mm512_sub_epi32(simde_mm512_set1_epi32(32), count1);
return simde_mm512_or_si512(simde_mm512_sllv_epi32(a, count1), simde_mm512_srlv_epi32(a, count2));
#endif
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_rolv_epi32
#define _mm512_rolv_epi32(a, b) simde_mm512_rolv_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_rolv_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_rolv_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_rolv_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_rolv_epi32
#define _mm512_mask_rolv_epi32(src, k, a, b) simde_mm512_mask_rolv_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_rolv_epi32 (simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_rolv_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_rolv_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_rolv_epi32
#define _mm512_maskz_rolv_epi32(k, a, b) simde_mm512_maskz_rolv_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_rolv_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_rolv_epi64(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_u64 = vec_rl(a_.altivec_u64, b_.altivec_u64);
return simde__m128i_from_private(r_);
#else
HEDLEY_STATIC_CAST(void, r_);
HEDLEY_STATIC_CAST(void, a_);
HEDLEY_STATIC_CAST(void, b_);
simde__m128i
count1 = simde_mm_and_si128(b, simde_mm_set1_epi64x(63)),
count2 = simde_mm_sub_epi64(simde_mm_set1_epi64x(64), count1);
return simde_mm_or_si128(simde_mm_sllv_epi64(a, count1), simde_mm_srlv_epi64(a, count2));
#endif
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_rolv_epi64
#define _mm_rolv_epi64(a, b) simde_mm_rolv_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_rolv_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_rolv_epi64(src, k, a, b);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_rolv_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_rolv_epi64
#define _mm_mask_rolv_epi64(src, k, a, b) simde_mm_mask_rolv_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_rolv_epi64 (simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_rolv_epi64(k, a, b);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_rolv_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_rolv_epi64
#define _mm_maskz_rolv_epi64(k, a, b) simde_mm_maskz_rolv_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_rolv_epi64 (simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_rolv_epi64(a, b);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) {
r_.m128i_private[i].altivec_u64 = vec_rl(a_.m128i_private[i].altivec_u64, b_.m128i_private[i].altivec_u64);
}
return simde__m256i_from_private(r_);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
r_.m128i[0] = simde_mm_rolv_epi64(a_.m128i[0], b_.m128i[0]);
r_.m128i[1] = simde_mm_rolv_epi64(a_.m128i[1], b_.m128i[1]);
return simde__m256i_from_private(r_);
#else
HEDLEY_STATIC_CAST(void, r_);
HEDLEY_STATIC_CAST(void, a_);
HEDLEY_STATIC_CAST(void, b_);
simde__m256i
count1 = simde_mm256_and_si256(b, simde_mm256_set1_epi64x(63)),
count2 = simde_mm256_sub_epi64(simde_mm256_set1_epi64x(64), count1);
return simde_mm256_or_si256(simde_mm256_sllv_epi64(a, count1), simde_mm256_srlv_epi64(a, count2));
#endif
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_rolv_epi64
#define _mm256_rolv_epi64(a, b) simde_mm256_rolv_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_rolv_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_rolv_epi64(src, k, a, b);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_rolv_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_rolv_epi64
#define _mm256_mask_rolv_epi64(src, k, a, b) simde_mm256_mask_rolv_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_rolv_epi64 (simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_rolv_epi64(k, a, b);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_rolv_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_rolv_epi64
#define _mm256_maskz_rolv_epi64(k, a, b) simde_mm256_maskz_rolv_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_rolv_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_rolv_epi64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) {
r_.m128i_private[i].altivec_u64 = vec_rl(a_.m128i_private[i].altivec_u64, b_.m128i_private[i].altivec_u64);
}
return simde__m512i_from_private(r_);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_rolv_epi64(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_rolv_epi64(a_.m256i[1], b_.m256i[1]);
return simde__m512i_from_private(r_);
#else
HEDLEY_STATIC_CAST(void, r_);
HEDLEY_STATIC_CAST(void, a_);
HEDLEY_STATIC_CAST(void, b_);
simde__m512i
count1 = simde_mm512_and_si512(b, simde_mm512_set1_epi64(63)),
count2 = simde_mm512_sub_epi64(simde_mm512_set1_epi64(64), count1);
return simde_mm512_or_si512(simde_mm512_sllv_epi64(a, count1), simde_mm512_srlv_epi64(a, count2));
#endif
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_rolv_epi64
#define _mm512_rolv_epi64(a, b) simde_mm512_rolv_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_rolv_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_rolv_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_rolv_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_rolv_epi64
#define _mm512_mask_rolv_epi64(src, k, a, b) simde_mm512_mask_rolv_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_rolv_epi64 (simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_rolv_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_rolv_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_rolv_epi64
#define _mm512_maskz_rolv_epi64(k, a, b) simde_mm512_maskz_rolv_epi64(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ROLV_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/packs.h | .h | 6,819 | 123 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_PACKS_H)
#define SIMDE_X86_AVX512_PACKS_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_packs_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_packs_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_packs_epi16(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_packs_epi16(a_.m256i[1], b_.m256i[1]);
#else
const size_t halfway_point = (sizeof(r_.i8) / sizeof(r_.i8[0])) / 2;
const size_t quarter_point = (sizeof(r_.i8) / sizeof(r_.i8[0])) / 4;
const size_t octet_point = (sizeof(r_.i8) / sizeof(r_.i8[0])) / 8;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < octet_point ; i++) {
r_.i8[i] = (a_.i16[i] > INT8_MAX) ? INT8_MAX : ((a_.i16[i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, a_.i16[i]));
r_.i8[i + octet_point] = (b_.i16[i] > INT8_MAX) ? INT8_MAX : ((b_.i16[i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, b_.i16[i]));
r_.i8[quarter_point + i] = (a_.i16[octet_point + i] > INT8_MAX) ? INT8_MAX : ((a_.i16[octet_point + i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, a_.i16[octet_point + i]));
r_.i8[quarter_point + i + octet_point] = (b_.i16[octet_point + i] > INT8_MAX) ? INT8_MAX : ((b_.i16[octet_point + i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, b_.i16[octet_point + i]));
r_.i8[halfway_point + i] = (a_.i16[quarter_point + i] > INT8_MAX) ? INT8_MAX : ((a_.i16[quarter_point + i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, a_.i16[quarter_point + i]));
r_.i8[halfway_point + i + octet_point] = (b_.i16[quarter_point + i] > INT8_MAX) ? INT8_MAX : ((b_.i16[quarter_point + i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, b_.i16[quarter_point + i]));
r_.i8[halfway_point + quarter_point + i] = (a_.i16[quarter_point + octet_point + i] > INT8_MAX) ? INT8_MAX : ((a_.i16[quarter_point + octet_point + i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, a_.i16[quarter_point + octet_point + i]));
r_.i8[halfway_point + quarter_point + i + octet_point] = (b_.i16[quarter_point + octet_point + i] > INT8_MAX) ? INT8_MAX : ((b_.i16[quarter_point + octet_point + i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, b_.i16[quarter_point + octet_point + i]));
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_packs_epi16
#define _mm512_packs_epi16(a, b) simde_mm512_packs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_packs_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_packs_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_packs_epi32(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_packs_epi32(a_.m256i[1], b_.m256i[1]);
#else
const size_t halfway_point = (sizeof(r_.i16) / sizeof(r_.i16[0])) / 2;
const size_t quarter_point = (sizeof(r_.i16) / sizeof(r_.i16[0])) / 4;
const size_t octet_point = (sizeof(r_.i16) / sizeof(r_.i16[0])) / 8;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < octet_point ; i++) {
r_.i16[i] = (a_.i32[i] > INT16_MAX) ? INT16_MAX : ((a_.i32[i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, a_.i32[i]));
r_.i16[i + octet_point] = (b_.i32[i] > INT16_MAX) ? INT16_MAX : ((b_.i32[i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, b_.i32[i]));
r_.i16[quarter_point + i] = (a_.i32[octet_point + i] > INT16_MAX) ? INT16_MAX : ((a_.i32[octet_point + i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, a_.i32[octet_point + i]));
r_.i16[quarter_point + i + octet_point] = (b_.i32[octet_point + i] > INT16_MAX) ? INT16_MAX : ((b_.i32[octet_point + i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, b_.i32[octet_point + i]));
r_.i16[halfway_point + i] = (a_.i32[quarter_point + i] > INT16_MAX) ? INT16_MAX : ((a_.i32[quarter_point +i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, a_.i32[quarter_point + i]));
r_.i16[halfway_point + i + octet_point] = (b_.i32[quarter_point + i] > INT16_MAX) ? INT16_MAX : ((b_.i32[quarter_point + i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, b_.i32[quarter_point +i]));
r_.i16[halfway_point + quarter_point + i] = (a_.i32[quarter_point + octet_point + i] > INT16_MAX) ? INT16_MAX : ((a_.i32[quarter_point + octet_point + i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, a_.i32[quarter_point + octet_point + i]));
r_.i16[halfway_point + quarter_point + i + octet_point] = (b_.i32[quarter_point + octet_point + i] > INT16_MAX) ? INT16_MAX : ((b_.i32[quarter_point + octet_point + i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, b_.i32[quarter_point + octet_point + i]));
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_packs_epi32
#define _mm512_packs_epi32(a, b) simde_mm512_packs_epi32(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_PACKS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/setr.h | .h | 4,723 | 145 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_SETR_H)
#define SIMDE_X86_AVX512_SETR_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_setr_epi32 (int32_t e15, int32_t e14, int32_t e13, int32_t e12, int32_t e11, int32_t e10, int32_t e9, int32_t e8,
int32_t e7, int32_t e6, int32_t e5, int32_t e4, int32_t e3, int32_t e2, int32_t e1, int32_t e0) {
simde__m512i_private r_;
r_.i32[ 0] = e15;
r_.i32[ 1] = e14;
r_.i32[ 2] = e13;
r_.i32[ 3] = e12;
r_.i32[ 4] = e11;
r_.i32[ 5] = e10;
r_.i32[ 6] = e9;
r_.i32[ 7] = e8;
r_.i32[ 8] = e7;
r_.i32[ 9] = e6;
r_.i32[10] = e5;
r_.i32[11] = e4;
r_.i32[12] = e3;
r_.i32[13] = e2;
r_.i32[14] = e1;
r_.i32[15] = e0;
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr_epi32
#define _mm512_setr_epi32(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0) simde_mm512_setr_epi32(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_setr_epi64 (int64_t e7, int64_t e6, int64_t e5, int64_t e4, int64_t e3, int64_t e2, int64_t e1, int64_t e0) {
simde__m512i_private r_;
r_.i64[0] = e7;
r_.i64[1] = e6;
r_.i64[2] = e5;
r_.i64[3] = e4;
r_.i64[4] = e3;
r_.i64[5] = e2;
r_.i64[6] = e1;
r_.i64[7] = e0;
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr_epi64
#define _mm512_setr_epi64(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm512_setr_epi64(e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_setr_ps (simde_float32 e15, simde_float32 e14, simde_float32 e13, simde_float32 e12,
simde_float32 e11, simde_float32 e10, simde_float32 e9, simde_float32 e8,
simde_float32 e7, simde_float32 e6, simde_float32 e5, simde_float32 e4,
simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
simde__m512_private r_;
r_.f32[ 0] = e15;
r_.f32[ 1] = e14;
r_.f32[ 2] = e13;
r_.f32[ 3] = e12;
r_.f32[ 4] = e11;
r_.f32[ 5] = e10;
r_.f32[ 6] = e9;
r_.f32[ 7] = e8;
r_.f32[ 8] = e7;
r_.f32[ 9] = e6;
r_.f32[10] = e5;
r_.f32[11] = e4;
r_.f32[12] = e3;
r_.f32[13] = e2;
r_.f32[14] = e1;
r_.f32[15] = e0;
return simde__m512_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr_ps
#define _mm512_setr_ps(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0) simde_mm512_setr_ps(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_setr_pd (simde_float64 e7, simde_float64 e6, simde_float64 e5, simde_float64 e4, simde_float64 e3, simde_float64 e2, simde_float64 e1, simde_float64 e0) {
simde__m512d_private r_;
r_.f64[0] = e7;
r_.f64[1] = e6;
r_.f64[2] = e5;
r_.f64[3] = e4;
r_.f64[4] = e3;
r_.f64[5] = e2;
r_.f64[6] = e1;
r_.f64[7] = e0;
return simde__m512d_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setr_pd
#define _mm512_setr_pd(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm512_setr_pd(e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SETR_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/dpbusd.h | .h | 10,824 | 293 | #if !defined(SIMDE_X86_AVX512_DPBUSD_H)
#define SIMDE_X86_AVX512_DPBUSD_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_dpbusd_epi32(simde__m128i src, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_dpbusd_epi32(src, a, b);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t x1_ SIMDE_VECTOR(64);
int32_t x2_ SIMDE_VECTOR(64);
simde__m128i_private
r1_[4],
r2_[4];
a_.u8 =
SIMDE_SHUFFLE_VECTOR_(
8, 16,
a_.u8, a_.u8,
0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15
);
b_.i8 =
SIMDE_SHUFFLE_VECTOR_(
8, 16,
b_.i8, b_.i8,
0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15
);
SIMDE_CONVERT_VECTOR_(x1_, a_.u8);
SIMDE_CONVERT_VECTOR_(x2_, b_.i8);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
src_.i32 +=
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
src_.i32[i / 4] += HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]);
}
#endif
return simde__m128i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_dpbusd_epi32
#define _mm_dpbusd_epi32(src, a, b) simde_mm_dpbusd_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_dpbusd_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_mask_dpbusd_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_dpbusd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_dpbusd_epi32
#define _mm_mask_dpbusd_epi32(src, k, a, b) simde_mm_mask_dpbusd_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_dpbusd_epi32(simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_maskz_dpbusd_epi32(k, src, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_dpbusd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_dpbusd_epi32
#define _mm_maskz_dpbusd_epi32(k, src, a, b) simde_mm_maskz_dpbusd_epi32(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_dpbusd_epi32(simde__m256i src, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_dpbusd_epi32(src, a, b);
#else
simde__m256i_private
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
src_.m128i[0] = simde_mm_dpbusd_epi32(src_.m128i[0], a_.m128i[0], b_.m128i[0]);
src_.m128i[1] = simde_mm_dpbusd_epi32(src_.m128i[1], a_.m128i[1], b_.m128i[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t x1_ SIMDE_VECTOR(128);
int32_t x2_ SIMDE_VECTOR(128);
simde__m256i_private
r1_[4],
r2_[4];
a_.u8 =
SIMDE_SHUFFLE_VECTOR_(
8, 32,
a_.u8, a_.u8,
0, 4, 8, 12, 16, 20, 24, 28,
1, 5, 9, 13, 17, 21, 25, 29,
2, 6, 10, 14, 18, 22, 26, 30,
3, 7, 11, 15, 19, 23, 27, 31
);
b_.i8 =
SIMDE_SHUFFLE_VECTOR_(
8, 32,
b_.i8, b_.i8,
0, 4, 8, 12, 16, 20, 24, 28,
1, 5, 9, 13, 17, 21, 25, 29,
2, 6, 10, 14, 18, 22, 26, 30,
3, 7, 11, 15, 19, 23, 27, 31
);
SIMDE_CONVERT_VECTOR_(x1_, a_.u8);
SIMDE_CONVERT_VECTOR_(x2_, b_.i8);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
src_.i32 +=
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
src_.i32[i / 4] += HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]);
}
#endif
return simde__m256i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_dpbusd_epi32
#define _mm256_dpbusd_epi32(src, a, b) simde_mm256_dpbusd_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_dpbusd_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_mask_dpbusd_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpbusd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_dpbusd_epi32
#define _mm256_mask_dpbusd_epi32(src, k, a, b) simde_mm256_mask_dpbusd_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_dpbusd_epi32(simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_maskz_dpbusd_epi32(k, src, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpbusd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_dpbusd_epi32
#define _mm256_maskz_dpbusd_epi32(k, src, a, b) simde_mm256_maskz_dpbusd_epi32(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_dpbusd_epi32(simde__m512i src, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_dpbusd_epi32(src, a, b);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
src_.m256i[0] = simde_mm256_dpbusd_epi32(src_.m256i[0], a_.m256i[0], b_.m256i[0]);
src_.m256i[1] = simde_mm256_dpbusd_epi32(src_.m256i[1], a_.m256i[1], b_.m256i[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t x1_ SIMDE_VECTOR(256);
int32_t x2_ SIMDE_VECTOR(256);
simde__m512i_private
r1_[4],
r2_[4];
a_.u8 =
SIMDE_SHUFFLE_VECTOR_(
8, 64,
a_.u8, a_.u8,
0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60,
1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61,
2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62,
3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63
);
b_.i8 =
SIMDE_SHUFFLE_VECTOR_(
8, 64,
b_.i8, b_.i8,
0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60,
1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61,
2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62,
3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63
);
SIMDE_CONVERT_VECTOR_(x1_, a_.u8);
SIMDE_CONVERT_VECTOR_(x2_, b_.i8);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
src_.i32 +=
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) +
(HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
src_.i32[i / 4] += HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]);
}
#endif
return simde__m512i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_dpbusd_epi32
#define _mm512_dpbusd_epi32(src, a, b) simde_mm512_dpbusd_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_dpbusd_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_mask_dpbusd_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpbusd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_dpbusd_epi32
#define _mm512_mask_dpbusd_epi32(src, k, a, b) simde_mm512_mask_dpbusd_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_dpbusd_epi32(simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_maskz_dpbusd_epi32(k, src, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpbusd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_dpbusd_epi32
#define _mm512_maskz_dpbusd_epi32(k, src, a, b) simde_mm512_maskz_dpbusd_epi32(k, src, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_DPBUSD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/adds.h | .h | 18,352 | 530 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_ADDS_H)
#define SIMDE_X86_AVX512_ADDS_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_adds_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_adds_epi8(src, k, a, b);
#else
return simde_mm_mask_mov_epi8(src, k, simde_mm_adds_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_adds_epi8
#define _mm_mask_adds_epi8(src, k, a, b) simde_mm_mask_adds_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_adds_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_maskz_adds_epi8(k, a, b);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_adds_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_adds_epi8
#define _mm_maskz_adds_epi8(k, a, b) simde_mm_maskz_adds_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_adds_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_adds_epi16(src, k, a, b);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_adds_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_adds_epi16
#define _mm_mask_adds_epi16(src, k, a, b) simde_mm_mask_adds_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_adds_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_maskz_adds_epi16(k, a, b);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_adds_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_adds_epi16
#define _mm_maskz_adds_epi16(k, a, b) simde_mm_maskz_adds_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_adds_epi8(simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_adds_epi8(src, k, a, b);
#else
return simde_mm256_mask_mov_epi8(src, k, simde_mm256_adds_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_adds_epi8
#define _mm256_mask_adds_epi8(src, k, a, b) simde_mm256_mask_adds_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_adds_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_maskz_adds_epi8(k, a, b);
#else
return simde_mm256_maskz_mov_epi8(k, simde_mm256_adds_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_adds_epi8
#define _mm256_maskz_adds_epi8(k, a, b) simde_mm256_maskz_adds_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_adds_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_adds_epi16(src, k, a, b);
#else
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_adds_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_adds_epi16
#define _mm256_mask_adds_epi16(src, k, a, b) simde_mm256_mask_adds_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_adds_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_maskz_adds_epi16(k, a, b);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_adds_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_adds_epi16
#define _mm256_maskz_adds_epi16(k, a, b) simde_mm256_maskz_adds_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_adds_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_adds_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_adds_epi8(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = simde_math_adds_i8(a_.i8[i], b_.i8[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_adds_epi8
#define _mm512_adds_epi8(a, b) simde_mm512_adds_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_adds_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_adds_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_adds_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_adds_epi8
#define _mm512_mask_adds_epi8(src, k, a, b) simde_mm512_mask_adds_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_adds_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_adds_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_adds_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_adds_epi8
#define _mm512_maskz_adds_epi8(k, a, b) simde_mm512_maskz_adds_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_adds_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_adds_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_adds_epi16(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = simde_math_adds_i16(a_.i16[i], b_.i16[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_adds_epi16
#define _mm512_adds_epi16(a, b) simde_mm512_adds_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_adds_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_adds_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_adds_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_adds_epi16
#define _mm512_mask_adds_epi16(src, k, a, b) simde_mm512_mask_adds_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_adds_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_adds_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_adds_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_adds_epi16
#define _mm512_maskz_adds_epi16(k, a, b) simde_mm512_maskz_adds_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_adds_epu8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_adds_epu8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_adds_epu8(a_.m128i[i], b_.m128i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = simde_math_adds_u8(a_.u8[i], b_.u8[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_adds_epu8
#define _mm512_adds_epu8(a, b) simde_mm512_adds_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_adds_epu8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_adds_epu8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_adds_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_adds_epu8
#define _mm512_mask_adds_epu8(src, k, a, b) simde_mm512_mask_adds_epu8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_adds_epu8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_adds_epu8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_adds_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_adds_epu8
#define _mm512_maskz_adds_epu8(k, a, b) simde_mm512_maskz_adds_epu8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_adds_epu16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_adds_epu16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_adds_epu16(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = simde_math_adds_u16(a_.u16[i], b_.u16[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_adds_epu16
#define _mm512_adds_epu16(a, b) simde_mm512_adds_epu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_adds_epu16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_adds_epu16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_adds_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_adds_epu16
#define _mm512_mask_adds_epu16(src, k, a, b) simde_mm512_mask_adds_epu16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_adds_epu16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_adds_epu16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_adds_epu16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_adds_epu16
#define _mm512_maskz_adds_epu16(k, a, b) simde_mm512_maskz_adds_epu16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_adds_epi32(simde__m128i a, simde__m128i b) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vqaddq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6)
r_.altivec_i32 = vec_adds(a_.altivec_i32, b_.altivec_i32);
#else
#if defined(SIMDE_X86_SSE2_NATIVE)
/* https://stackoverflow.com/a/56544654/501126 */
const __m128i int_max = _mm_set1_epi32(INT32_MAX);
/* normal result (possibly wraps around) */
const __m128i sum = _mm_add_epi32(a_.n, b_.n);
/* If result saturates, it has the same sign as both a and b */
const __m128i sign_bit = _mm_srli_epi32(a_.n, 31); /* shift sign to lowest bit */
#if defined(SIMDE_X86_AVX512VL_NATIVE)
const __m128i overflow = _mm_ternarylogic_epi32(a_.n, b_.n, sum, 0x42);
#else
const __m128i sign_xor = _mm_xor_si128(a_.n, b_.n);
const __m128i overflow = _mm_andnot_si128(sign_xor, _mm_xor_si128(a_.n, sum));
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r_.n = _mm_mask_add_epi32(sum, _mm_movepi32_mask(overflow), int_max, sign_bit);
#else
const __m128i saturated = _mm_add_epi32(int_max, sign_bit);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.n =
_mm_castps_si128(
_mm_blendv_ps(
_mm_castsi128_ps(sum),
_mm_castsi128_ps(saturated),
_mm_castsi128_ps(overflow)
)
);
#else
const __m128i overflow_mask = _mm_srai_epi32(overflow, 31);
r_.n =
_mm_or_si128(
_mm_and_si128(overflow_mask, saturated),
_mm_andnot_si128(overflow_mask, sum)
);
#endif
#endif
#elif defined(SIMDE_VECTOR_SCALAR)
uint32_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.i32);
uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.i32);
uint32_t ru SIMDE_VECTOR(16) = au + bu;
au = (au >> 31) + INT32_MAX;
uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au ^ bu) | ~(bu ^ ru)) < 0);
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au & ~m) | (ru & m));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = simde_math_adds_i32(a_.i32[i], b_.i32[i]);
}
#endif
#endif
return simde__m128i_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_x_mm256_adds_epi32(simde__m256i a, simde__m256i b) {
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_adds_epi32(a_.m128i[i], b_.m128i[i]);
}
#elif defined(SIMDE_VECTOR_SCALAR)
uint32_t au SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.i32);
uint32_t bu SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.i32);
uint32_t ru SIMDE_VECTOR(32) = au + bu;
au = (au >> 31) + INT32_MAX;
uint32_t m SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au ^ bu) | ~(bu ^ ru)) < 0);
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au & ~m) | (ru & m));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = simde_math_adds_i32(a_.i32[i], b_.i32[i]);
}
#endif
return simde__m256i_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_x_mm512_adds_epi32(simde__m512i a, simde__m512i b) {
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_x_mm_adds_epi32(a_.m128i[i], b_.m128i[i]);
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_x_mm256_adds_epi32(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SCALAR)
uint32_t au SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.i32);
uint32_t bu SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.i32);
uint32_t ru SIMDE_VECTOR(64) = au + bu;
au = (au >> 31) + INT32_MAX;
uint32_t m SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au ^ bu) | ~(bu ^ ru)) < 0);
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au & ~m) | (ru & m));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = simde_math_adds_i32(a_.i32[i], b_.i32[i]);
}
#endif
return simde__m512i_from_private(r_);
}
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ADDS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/dpwssd.h | .h | 8,962 | 270 | #if !defined(SIMDE_X86_AVX512_DPWSSD_H)
#define SIMDE_X86_AVX512_DPWSSD_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_dpwssd_epi32(simde__m128i src, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_dpwssd_epi32(src, a, b);
#else
simde__m128i_private
src_ = simde__m128i_to_private(src),
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_)
int32_t x1_ SIMDE_VECTOR(32);
int32_t x2_ SIMDE_VECTOR(32);
simde__m128i_private
r1_[2],
r2_[2];
a_.i16 =
SIMDE_SHUFFLE_VECTOR_(
16, 16,
a_.i16, a_.i16,
0, 2, 4, 6,
1, 3, 5, 7
);
b_.i16 =
SIMDE_SHUFFLE_VECTOR_(
16, 16,
b_.i16, b_.i16,
0, 2, 4, 6,
1, 3, 5, 7
);
SIMDE_CONVERT_VECTOR_(x1_, a_.i16);
SIMDE_CONVERT_VECTOR_(x2_, b_.i16);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
src_.i32 +=
(r1_[0].i32 * r2_[0].i32) +
(r1_[1].i32 * r2_[1].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.i16[0])) ; i++) {
src_.i32[i / 2] += HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]);
}
#endif
return simde__m128i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_dpwssd_epi32
#define _mm_dpwssd_epi32(src, a, b) simde_mm_dpwssd_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_dpwssd_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_mask_dpwssd_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_dpwssd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_dpwssd_epi32
#define _mm_mask_dpwssd_epi32(src, k, a, b) simde_mm_mask_dpwssd_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_dpwssd_epi32(simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm_maskz_dpwssd_epi32(k, src, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_dpwssd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_dpwssd_epi32
#define _mm_maskz_dpwssd_epi32(k, src, a, b) simde_mm_maskz_dpwssd_epi32(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_dpwssd_epi32(simde__m256i src, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_dpwssd_epi32(src, a, b);
#else
simde__m256i_private
src_ = simde__m256i_to_private(src),
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_)
int32_t x1_ SIMDE_VECTOR(64);
int32_t x2_ SIMDE_VECTOR(64);
simde__m256i_private
r1_[2],
r2_[2];
a_.i16 =
SIMDE_SHUFFLE_VECTOR_(
16, 32,
a_.i16, a_.i16,
0, 2, 4, 6, 8, 10, 12, 14,
1, 3, 5, 7, 9, 11, 13, 15
);
b_.i16 =
SIMDE_SHUFFLE_VECTOR_(
16, 32,
b_.i16, b_.i16,
0, 2, 4, 6, 8, 10, 12, 14,
1, 3, 5, 7, 9, 11, 13, 15
);
SIMDE_CONVERT_VECTOR_(x1_, a_.i16);
SIMDE_CONVERT_VECTOR_(x2_, b_.i16);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
src_.i32 +=
(r1_[0].i32 * r2_[0].i32) +
(r1_[1].i32 * r2_[1].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.i16[0])) ; i++) {
src_.i32[i / 2] += HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]);
}
#endif
return simde__m256i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_dpwssd_epi32
#define _mm256_dpwssd_epi32(src, a, b) simde_mm256_dpwssd_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_dpwssd_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_mask_dpwssd_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpwssd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_dpwssd_epi32
#define _mm256_mask_dpwssd_epi32(src, k, a, b) simde_mm256_mask_dpwssd_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_dpwssd_epi32(simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm256_maskz_dpwssd_epi32(k, src, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpwssd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_dpwssd_epi32
#define _mm256_maskz_dpwssd_epi32(k, src, a, b) simde_mm256_maskz_dpwssd_epi32(k, src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_dpwssd_epi32(simde__m512i src, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_dpwssd_epi32(src, a, b);
#else
simde__m512i_private
src_ = simde__m512i_to_private(src),
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_)
int32_t x1_ SIMDE_VECTOR(128);
int32_t x2_ SIMDE_VECTOR(128);
simde__m512i_private
r1_[2],
r2_[2];
a_.i16 =
SIMDE_SHUFFLE_VECTOR_(
16, 64,
a_.i16, a_.i16,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
);
b_.i16 =
SIMDE_SHUFFLE_VECTOR_(
16, 64,
b_.i16, b_.i16,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
);
SIMDE_CONVERT_VECTOR_(x1_, a_.i16);
SIMDE_CONVERT_VECTOR_(x2_, b_.i16);
simde_memcpy(&r1_, &x1_, sizeof(x1_));
simde_memcpy(&r2_, &x2_, sizeof(x2_));
src_.i32 +=
(r1_[0].i32 * r2_[0].i32) +
(r1_[1].i32 * r2_[1].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.i16[0])) ; i++) {
src_.i32[i / 2] += HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]);
}
#endif
return simde__m512i_from_private(src_);
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_dpwssd_epi32
#define _mm512_dpwssd_epi32(src, a, b) simde_mm512_dpwssd_epi32(src, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_dpwssd_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_mask_dpwssd_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpwssd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_dpwssd_epi32
#define _mm512_mask_dpwssd_epi32(src, k, a, b) simde_mm512_mask_dpwssd_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_dpwssd_epi32(simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512VNNI_NATIVE)
return _mm512_maskz_dpwssd_epi32(k, src, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpwssd_epi32(src, a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_dpwssd_epi32
#define _mm512_maskz_dpwssd_epi32(k, src, a, b) simde_mm512_maskz_dpwssd_epi32(k, src, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_DPWSSD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/loadu.h | .h | 11,094 | 298 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_AVX512_LOADU_H)
#define SIMDE_X86_AVX512_LOADU_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_loadu_ps (void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
#if defined(SIMDE_BUG_CLANG_REV_298042)
return _mm512_loadu_ps(SIMDE_ALIGN_CAST(const float *, mem_addr));
#else
return _mm512_loadu_ps(mem_addr);
#endif
#else
simde__m512 r;
simde_memcpy(&r, mem_addr, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_loadu_ps
#define _mm512_loadu_ps(a) simde_mm512_loadu_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_loadu_pd (void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
#if defined(SIMDE_BUG_CLANG_REV_298042)
return _mm512_loadu_pd(SIMDE_ALIGN_CAST(const double *, mem_addr));
#else
return _mm512_loadu_pd(mem_addr);
#endif
#else
simde__m512d r;
simde_memcpy(&r, mem_addr, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_loadu_pd
#define _mm512_loadu_pd(a) simde_mm512_loadu_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512h
simde_mm512_loadu_ph (void const * mem_addr) {
#if defined(SIMDE_X86_AVX512FP16_NATIVE)
return _mm512_loadu_ph(mem_addr);
#else
simde__m512h r;
simde_memcpy(&r, mem_addr, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_AVX512FP16_ENABLE_NATIVE_ALIASES)
#undef _mm512_loadu_ph
#define _mm512_loadu_ph(a) simde_mm512_loadu_ph(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_loadu_si512 (void const * mem_addr) {
simde__m512i r;
#if HEDLEY_GNUC_HAS_ATTRIBUTE(may_alias,3,3,0)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_PACKED_
struct simde_mm512_loadu_si512_s {
__typeof__(r) v;
} __attribute__((__packed__, __may_alias__));
r = HEDLEY_REINTERPRET_CAST(const struct simde_mm512_loadu_si512_s *, mem_addr)->v;
HEDLEY_DIAGNOSTIC_POP
#else
simde_memcpy(&r, mem_addr, sizeof(r));
#endif
return r;
}
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(10,0,0)) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_mm512_loadu_si512(mem_addr) _mm512_loadu_si512(mem_addr)
#define simde_mm512_loadu_epi32(mem_addr) _mm512_loadu_epi32(mem_addr)
#define simde_mm512_loadu_epi64(mem_addr) _mm512_loadu_epi64(mem_addr)
#else
#define simde_mm512_loadu_epi32(mem_addr) simde_mm512_loadu_si512(mem_addr)
#define simde_mm512_loadu_epi64(mem_addr) simde_mm512_loadu_si512(mem_addr)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(11,0,0)) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0))
#define simde_mm512_loadu_epi8(mem_addr) _mm512_loadu_epi8(mem_addr)
#define simde_mm512_loadu_epi16(mem_addr) _mm512_loadu_epi16(mem_addr)
#else
#define simde_mm512_loadu_epi8(mem_addr) simde_mm512_loadu_si512(mem_addr)
#define simde_mm512_loadu_epi16(mem_addr) simde_mm512_loadu_si512(mem_addr)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_loadu_epi8
#undef _mm512_loadu_epi16
#define _mm512_loadu_epi8(a) simde_mm512_loadu_epi8(a)
#define _mm512_loadu_epi16(a) simde_mm512_loadu_epi16(a)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_loadu_epi32
#undef _mm512_loadu_epi64
#undef _mm512_loadu_si512
#define _mm512_loadu_si512(a) simde_mm512_loadu_si512(a)
#define _mm512_loadu_epi32(a) simde_mm512_loadu_epi32(a)
#define _mm512_loadu_epi64(a) simde_mm512_loadu_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_loadu_epi16 (simde__mmask16 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_loadu_epi16(k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_loadu_epi16(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_loadu_epi16
#define _mm256_maskz_loadu_epi16(k, mem_addr) simde_mm256_maskz_loadu_epi16(k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_loadu_ps (simde__mmask8 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_loadu_ps(k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_loadu_ps(HEDLEY_REINTERPRET_CAST(const float*, mem_addr)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_loadu_ps
#define _mm256_maskz_loadu_ps(k, mem_addr) simde_mm256_maskz_loadu_ps(k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_loadu_epi16 (simde__m512i src, simde__mmask32 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_loadu_epi16(src, k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_loadu_epi16(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_loadu_epi16
#define _mm512_mask_loadu_epi16(src, k, mem_addr) simde_mm512_mask_loadu_epi16(src, k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_loadu_epi16 (simde__mmask32 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_loadu_epi16(k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_loadu_epi16(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_loadu_epi16
#define _mm512_maskz_loadu_epi16(k, mem_addr) simde_mm512_maskz_loadu_epi16(k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_loadu_epi32 (simde__m512i src, simde__mmask16 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_loadu_epi32(src, k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_loadu_epi32(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_loadu_epi32
#define _mm512_mask_loadu_epi32(src, k, mem_addr) simde_mm512_mask_loadu_epi32(src, k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_loadu_epi64 (simde__m512i src, simde__mmask8 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_loadu_epi64(src, k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_loadu_epi64(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_loadu_epi64
#define _mm512_mask_loadu_epi64(src, k, mem_addr) simde_mm512_mask_loadu_epi64(src, k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_loadu_epi64 (simde__mmask8 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_loadu_epi64(k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_loadu_epi64(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_loadu_epi64
#define _mm512_maskz_loadu_epi64(k, mem_addr) simde_mm512_maskz_loadu_epi64((k), (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_loadu_pd (simde__m512d src, simde__mmask8 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_loadu_pd(src, k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_loadu_pd(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_loadu_pd
#define _mm512_mask_loadu_pd(src, k, mem_addr) simde_mm512_mask_loadu_pd(src, k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_loadu_ps (simde__m512 src, simde__mmask16 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_loadu_ps(src, k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_loadu_ps(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_loadu_ps
#define _mm512_mask_loadu_ps(src, k, mem_addr) simde_mm512_mask_loadu_ps(src, k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_loadu_ps (simde__mmask16 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_loadu_ps(k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_loadu_ps(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_loadu_ps
#define _mm512_maskz_loadu_ps(k, mem_addr) simde_mm512_maskz_loadu_ps(k, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_loadu_pd (simde__mmask8 k, void const * mem_addr) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_loadu_pd(k, HEDLEY_REINTERPRET_CAST(void const*, mem_addr));
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_loadu_pd(mem_addr));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_loadu_pd
#define _mm512_maskz_loadu_pd(k, mem_addr) simde_mm512_maskz_loadu_pd(k, mem_addr)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_LOADU_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/bitshuffle.h | .h | 7,989 | 203 | #if !defined(SIMDE_X86_AVX512_BITSHUFFLE_H)
#define SIMDE_X86_AVX512_BITSHUFFLE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_bitshuffle_epi64_mask (simde__m128i b, simde__m128i c) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_bitshuffle_epi64_mask(b, c);
#else
simde__m128i_private
b_ = simde__m128i_to_private(b),
c_ = simde__m128i_to_private(c);
simde__mmask16 r = 0;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(b_.u64) rv = { 0, 0 };
__typeof__(b_.u64) lshift = { 0, 8 };
for (int8_t i = 0 ; i < 8 ; i++) {
__typeof__(b_.u64) ct = (HEDLEY_REINTERPRET_CAST(__typeof__(ct), c_.u8) >> (i * 8)) & 63;
rv |= ((b_.u64 >> ct) & 1) << lshift;
lshift += 1;
}
r =
HEDLEY_STATIC_CAST(simde__mmask16, rv[0]) |
HEDLEY_STATIC_CAST(simde__mmask16, rv[1]);
#else
for (size_t i = 0 ; i < (sizeof(c_.m64_private) / sizeof(c_.m64_private[0])) ; i++) {
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t j = 0 ; j < (sizeof(c_.m64_private[i].u8) / sizeof(c_.m64_private[i].u8[0])) ; j++) {
r |= (((b_.u64[i] >> (c_.m64_private[i].u8[j]) & 63) & 1) << ((i * 8) + j));
}
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_bitshuffle_epi64_mask
#define _mm_bitshuffle_epi64_mask(b, c) simde_mm_bitshuffle_epi64_mask(b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm_mask_bitshuffle_epi64_mask (simde__mmask16 k, simde__m128i b, simde__m128i c) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_bitshuffle_epi64_mask(k, b, c);
#else
return (k & simde_mm_bitshuffle_epi64_mask(b, c));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_bitshuffle_epi64_mask
#define _mm_mask_bitshuffle_epi64_mask(k, b, c) simde_mm_mask_bitshuffle_epi64_mask(k, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_bitshuffle_epi64_mask (simde__m256i b, simde__m256i c) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_bitshuffle_epi64_mask(b, c);
#else
simde__m256i_private
b_ = simde__m256i_to_private(b),
c_ = simde__m256i_to_private(c);
simde__mmask32 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < sizeof(b_.m128i) / sizeof(b_.m128i[0]) ; i++) {
r |= (HEDLEY_STATIC_CAST(simde__mmask32, simde_mm_bitshuffle_epi64_mask(b_.m128i[i], c_.m128i[i])) << (i * 16));
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(b_.u64) rv = { 0, 0, 0, 0 };
__typeof__(b_.u64) lshift = { 0, 8, 16, 24 };
for (int8_t i = 0 ; i < 8 ; i++) {
__typeof__(b_.u64) ct = (HEDLEY_REINTERPRET_CAST(__typeof__(ct), c_.u8) >> (i * 8)) & 63;
rv |= ((b_.u64 >> ct) & 1) << lshift;
lshift += 1;
}
r =
HEDLEY_STATIC_CAST(simde__mmask32, rv[0]) |
HEDLEY_STATIC_CAST(simde__mmask32, rv[1]) |
HEDLEY_STATIC_CAST(simde__mmask32, rv[2]) |
HEDLEY_STATIC_CAST(simde__mmask32, rv[3]);
#else
for (size_t i = 0 ; i < (sizeof(c_.m128i_private) / sizeof(c_.m128i_private[0])) ; i++) {
for (size_t j = 0 ; j < (sizeof(c_.m128i_private[i].m64_private) / sizeof(c_.m128i_private[i].m64_private[0])) ; j++) {
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t k = 0 ; k < (sizeof(c_.m128i_private[i].m64_private[j].u8) / sizeof(c_.m128i_private[i].m64_private[j].u8[0])) ; k++) {
r |= (((b_.m128i_private[i].u64[j] >> (c_.m128i_private[i].m64_private[j].u8[k]) & 63) & 1) << ((i * 16) + (j * 8) + k));
}
}
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_bitshuffle_epi64_mask
#define _mm256_bitshuffle_epi64_mask(b, c) simde_mm256_bitshuffle_epi64_mask(b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm256_mask_bitshuffle_epi64_mask (simde__mmask32 k, simde__m256i b, simde__m256i c) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_bitshuffle_epi64_mask(k, b, c);
#else
return (k & simde_mm256_bitshuffle_epi64_mask(b, c));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_bitshuffle_epi64_mask
#define _mm256_mask_bitshuffle_epi64_mask(k, b, c) simde_mm256_mask_bitshuffle_epi64_mask(k, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_bitshuffle_epi64_mask (simde__m512i b, simde__m512i c) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_bitshuffle_epi64_mask(b, c);
#else
simde__m512i_private
b_ = simde__m512i_to_private(b),
c_ = simde__m512i_to_private(c);
simde__mmask64 r = 0;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(b_.m128i) / sizeof(b_.m128i[0])) ; i++) {
r |= (HEDLEY_STATIC_CAST(simde__mmask64, simde_mm_bitshuffle_epi64_mask(b_.m128i[i], c_.m128i[i])) << (i * 16));
}
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(b_.m256i) / sizeof(b_.m256i[0])) ; i++) {
r |= (HEDLEY_STATIC_CAST(simde__mmask64, simde_mm256_bitshuffle_epi64_mask(b_.m256i[i], c_.m256i[i])) << (i * 32));
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(b_.u64) rv = { 0, 0, 0, 0, 0, 0, 0, 0 };
__typeof__(b_.u64) lshift = { 0, 8, 16, 24, 32, 40, 48, 56 };
for (int8_t i = 0 ; i < 8 ; i++) {
__typeof__(b_.u64) ct = (HEDLEY_REINTERPRET_CAST(__typeof__(ct), c_.u8) >> (i * 8)) & 63;
rv |= ((b_.u64 >> ct) & 1) << lshift;
lshift += 1;
}
r =
HEDLEY_STATIC_CAST(simde__mmask64, rv[0]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[1]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[2]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[3]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[4]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[5]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[6]) |
HEDLEY_STATIC_CAST(simde__mmask64, rv[7]);
#else
for (size_t i = 0 ; i < (sizeof(c_.m128i_private) / sizeof(c_.m128i_private[0])) ; i++) {
for (size_t j = 0 ; j < (sizeof(c_.m128i_private[i].m64_private) / sizeof(c_.m128i_private[i].m64_private[0])) ; j++) {
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t k = 0 ; k < (sizeof(c_.m128i_private[i].m64_private[j].u8) / sizeof(c_.m128i_private[i].m64_private[j].u8[0])) ; k++) {
r |= (((b_.m128i_private[i].u64[j] >> (c_.m128i_private[i].m64_private[j].u8[k]) & 63) & 1) << ((i * 16) + (j * 8) + k));
}
}
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_bitshuffle_epi64_mask
#define _mm512_bitshuffle_epi64_mask(b, c) simde_mm512_bitshuffle_epi64_mask(b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_bitshuffle_epi64_mask (simde__mmask64 k, simde__m512i b, simde__m512i c) {
#if defined(SIMDE_X86_AVX512BITALG_NATIVE)
return _mm512_mask_bitshuffle_epi64_mask(k, b, c);
#else
return (k & simde_mm512_bitshuffle_epi64_mask(b, c));
#endif
}
#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_bitshuffle_epi64_mask
#define _mm512_mask_bitshuffle_epi64_mask(k, b, c) simde_mm512_mask_bitshuffle_epi64_mask(k, b, c)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_BITSHUFFLE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/conflict.h | .h | 11,984 | 352 | #if !defined(SIMDE_X86_AVX512_CONFLICT_H)
#define SIMDE_X86_AVX512_CONFLICT_H
#include "types.h"
#include "mov_mask.h"
#include "mov.h"
#include "cmpeq.h"
#include "set1.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_conflict_epi32 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_conflict_epi32(a);
#else
simde__m128i_private
r_ = simde__m128i_to_private(simde_mm_setzero_si128()),
a_ = simde__m128i_to_private(a);
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] =
simde_mm_movemask_ps(
simde_mm_castsi128_ps(
simde_mm_cmpeq_epi32(simde_mm_set1_epi32(a_.i32[i]), a)
)
) & ((1 << i) - 1);
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_conflict_epi32
#define _mm_conflict_epi32(a) simde_mm_conflict_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_conflict_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_mask_conflict_epi32(src, k, a);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_conflict_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_conflict_epi32
#define _mm_mask_conflict_epi32(src, k, a) simde_mm_mask_conflict_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_conflict_epi32 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_maskz_conflict_epi32(k, a);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_conflict_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_conflict_epi32
#define _mm_maskz_conflict_epi32(k, a) simde_mm_maskz_conflict_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_conflict_epi32 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm256_conflict_epi32(a);
#else
simde__m256i_private
r_ = simde__m256i_to_private(simde_mm256_setzero_si256()),
a_ = simde__m256i_to_private(a);
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] =
simde_mm256_movemask_ps(
simde_mm256_castsi256_ps(
simde_mm256_cmpeq_epi32(simde_mm256_set1_epi32(a_.i32[i]), a)
)
) & ((1 << i) - 1);
}
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm256_conflict_epi32
#define _mm256_conflict_epi32(a) simde_mm256_conflict_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_conflict_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm256_mask_conflict_epi32(src, k, a);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_conflict_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_conflict_epi32
#define _mm256_mask_conflict_epi32(src, k, a) simde_mm256_mask_conflict_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_conflict_epi32 (simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm256_maskz_conflict_epi32(k, a);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_conflict_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_conflict_epi32
#define _mm256_maskz_conflict_epi32(k, a) simde_mm256_maskz_conflict_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_conflict_epi32 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm512_conflict_epi32(a);
#else
simde__m512i_private
r_ = simde__m512i_to_private(simde_mm512_setzero_si512()),
a_ = simde__m512i_to_private(a);
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] =
HEDLEY_STATIC_CAST(
int32_t,
simde_mm512_cmpeq_epi32_mask(simde_mm512_set1_epi32(a_.i32[i]), a)
) & ((1 << i) - 1);
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm512_conflict_epi32
#define _mm512_conflict_epi32(a) simde_mm512_conflict_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_conflict_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm512_mask_conflict_epi32(src, k, a);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_conflict_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_conflict_epi32
#define _mm512_mask_conflict_epi32(src, k, a) simde_mm512_mask_conflict_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_conflict_epi32 (simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm512_maskz_conflict_epi32(k, a);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_conflict_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_conflict_epi32
#define _mm512_maskz_conflict_epi32(k, a) simde_mm512_maskz_conflict_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_conflict_epi64 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_conflict_epi64(a);
#else
simde__m128i_private
r_ = simde__m128i_to_private(simde_mm_setzero_si128()),
a_ = simde__m128i_to_private(a);
for (size_t i = 1 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] =
HEDLEY_STATIC_CAST(
int64_t,
simde_mm_movemask_pd(
simde_mm_castsi128_pd(
simde_mm_cmpeq_epi64(simde_mm_set1_epi64x(a_.i64[i]), a)
)
)
) & ((1 << i) - 1);
}
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_conflict_epi64
#define _mm_conflict_epi64(a) simde_mm_conflict_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_conflict_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_mask_conflict_epi64(src, k, a);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_conflict_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_conflict_epi64
#define _mm_mask_conflict_epi64(src, k, a) simde_mm_mask_conflict_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_conflict_epi64 (simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_maskz_conflict_epi64(k, a);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_conflict_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_conflict_epi64
#define _mm_maskz_conflict_epi64(k, a) simde_mm_maskz_conflict_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_conflict_epi64 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm256_conflict_epi64(a);
#else
simde__m256i_private
r_ = simde__m256i_to_private(simde_mm256_setzero_si256()),
a_ = simde__m256i_to_private(a);
for (size_t i = 1 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] =
HEDLEY_STATIC_CAST(
int64_t,
simde_mm256_movemask_pd(
simde_mm256_castsi256_pd(
simde_mm256_cmpeq_epi64(simde_mm256_set1_epi64x(a_.i64[i]), a)
)
)
) & ((1 << i) - 1);
}
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm256_conflict_epi64
#define _mm256_conflict_epi64(a) simde_mm256_conflict_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_conflict_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm256_mask_conflict_epi64(src, k, a);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_conflict_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_conflict_epi64
#define _mm256_mask_conflict_epi64(src, k, a) simde_mm256_mask_conflict_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_conflict_epi64 (simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm256_maskz_conflict_epi64(k, a);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_conflict_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_conflict_epi64
#define _mm256_maskz_conflict_epi64(k, a) simde_mm256_maskz_conflict_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_conflict_epi64 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm512_conflict_epi64(a);
#else
simde__m512i_private
r_ = simde__m512i_to_private(simde_mm512_setzero_si512()),
a_ = simde__m512i_to_private(a);
for (size_t i = 1 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] =
HEDLEY_STATIC_CAST(
int64_t,
simde_mm512_cmpeq_epi64_mask(simde_mm512_set1_epi64(a_.i64[i]), a)
) & ((1 << i) - 1);
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm512_conflict_epi64
#define _mm512_conflict_epi64(a) simde_mm512_conflict_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_conflict_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm512_mask_conflict_epi64(src, k, a);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_conflict_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_conflict_epi64
#define _mm512_mask_conflict_epi64(src, k, a) simde_mm512_mask_conflict_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_conflict_epi64 (simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm512_maskz_conflict_epi64(k, a);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_conflict_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_conflict_epi64
#define _mm512_maskz_conflict_epi64(k, a) simde_mm512_maskz_conflict_epi64(k, a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CONFLICT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/subs.h | .h | 7,107 | 223 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SUBS_H)
#define SIMDE_X86_AVX512_SUBS_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_subs_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_subs_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_subs_epi8(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = simde_math_subs_i8(a_.i8[i], b_.i8[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_subs_epi8
#define _mm512_subs_epi8(a, b) simde_mm512_subs_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_subs_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_subs_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_subs_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_subs_epi8
#define _mm512_mask_subs_epi8(src, k, a, b) simde_mm512_mask_subs_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_subs_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_subs_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_subs_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_subs_epi8
#define _mm512_maskz_subs_epi8(k, a, b) simde_mm512_maskz_subs_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_subs_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_subs_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_subs_epi16(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = simde_math_subs_i16(a_.i16[i], b_.i16[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_subs_epi16
#define _mm512_subs_epi16(a, b) simde_mm512_subs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_subs_epu8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_subs_epu8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_subs_epu8(a_.m128i[i], b_.m128i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = simde_math_subs_u8(a_.u8[i], b_.u8[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_subs_epu8
#define _mm512_subs_epu8(a, b) simde_mm512_subs_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_subs_epu8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_subs_epu8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_subs_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_subs_epu8
#define _mm512_mask_subs_epu8(src, k, a, b) simde_mm512_mask_subs_epu8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_subs_epu8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_subs_epu8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_subs_epu8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_subs_epu8
#define _mm512_maskz_subs_epu8(k, a, b) simde_mm512_maskz_subs_epu8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_subs_epu16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_subs_epu16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if !defined(HEDLEY_INTEL_VERSION)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_subs_epu16(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = simde_math_subs_u16(a_.u16[i], b_.u16[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_subs_epu16
#define _mm512_subs_epu16(a, b) simde_mm512_subs_epu16(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SUBS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/2intersect.h | .h | 8,026 | 251 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Ashleigh Newman-Jones <ashnewman-jones@hotmail.co.uk>
*/
#if !defined(SIMDE_X86_AVX512_2INTERSECT_H)
#define SIMDE_X86_AVX512_2INTERSECT_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_2intersect_epi32(simde__m128i a, simde__m128i b, simde__mmask8 *k1, simde__mmask8 *k2) {
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
_mm_2intersect_epi32(a, b, k1, k2);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
simde__mmask8
k1_ = 0,
k2_ = 0;
for (size_t i = 0 ; i < sizeof(a_.i32) / sizeof(a_.i32[0]) ; i++) {
#if defined(SIMDE_ENABLE_OPENMP)
#pragma omp simd reduction(|:k1_) reduction(|:k2_)
#else
SIMDE_VECTORIZE
#endif
for (size_t j = 0 ; j < sizeof(b_.i32) / sizeof(b_.i32[0]) ; j++) {
const int32_t m = a_.i32[i] == b_.i32[j];
k1_ |= m << i;
k2_ |= m << j;
}
}
*k1 = k1_;
*k2 = k2_;
#endif
}
#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef __mm_2intersect_epi32
#define __mm_2intersect_epi32(a,b, k1, k2) simde_mm_2intersect_epi32(a, b, k1, k2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_2intersect_epi64(simde__m128i a, simde__m128i b, simde__mmask8 *k1, simde__mmask8 *k2) {
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
_mm_2intersect_epi64(a, b, k1, k2);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
simde__mmask8
k1_ = 0,
k2_ = 0;
for (size_t i = 0 ; i < sizeof(a_.i64) / sizeof(a_.i64[0]) ; i++) {
#if defined(SIMDE_ENABLE_OPENMP)
#pragma omp simd reduction(|:k1_) reduction(|:k2_)
#else
SIMDE_VECTORIZE
#endif
for (size_t j = 0 ; j < sizeof(b_.i64) / sizeof(b_.i64[0]) ; j++) {
const int32_t m = a_.i64[i] == b_.i64[j];
k1_ |= m << i;
k2_ |= m << j;
}
}
*k1 = k1_;
*k2 = k2_;
#endif
}
#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef __mm_2intersect_epi64
#define __mm_2intersect_epi64(a,b, k1, k2) simde_mm_2intersect_epi64(a, b, k1, k2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm256_2intersect_epi32(simde__m256i a, simde__m256i b, simde__mmask8 *k1, simde__mmask8 *k2) {
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
_mm256_2intersect_epi32(a, b, k1, k2);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
simde__mmask8
k1_ = 0,
k2_ = 0;
for (size_t i = 0 ; i < sizeof(a_.i32) / sizeof(a_.i32[0]) ; i++) {
#if defined(SIMDE_ENABLE_OPENMP)
#pragma omp simd reduction(|:k1_) reduction(|:k2_)
#else
SIMDE_VECTORIZE
#endif
for (size_t j = 0 ; j < sizeof(b_.i32) / sizeof(b_.i32[0]) ; j++) {
const int32_t m = a_.i32[i] == b_.i32[j];
k1_ |= m << i;
k2_ |= m << j;
}
}
*k1 = k1_;
*k2 = k2_;
#endif
}
#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_2intersect_epi32
#define _mm256_2intersect_epi32(a,b, k1, k2) simde_mm256_2intersect_epi32(a, b, k1, k2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm256_2intersect_epi64(simde__m256i a, simde__m256i b, simde__mmask8 *k1, simde__mmask8 *k2) {
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
_mm256_2intersect_epi64(a, b, k1, k2);
#else
simde__m256i_private
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
simde__mmask8
k1_ = 0,
k2_ = 0;
for (size_t i = 0 ; i < sizeof(a_.i64) / sizeof(a_.i64[0]) ; i++) {
#if defined(SIMDE_ENABLE_OPENMP)
#pragma omp simd reduction(|:k1_) reduction(|:k2_)
#else
SIMDE_VECTORIZE
#endif
for (size_t j = 0 ; j < sizeof(b_.i64) / sizeof(b_.i64[0]) ; j++) {
const int32_t m = a_.i64[i] == b_.i64[j];
k1_ |= m << i;
k2_ |= m << j;
}
}
*k1 = k1_;
*k2 = k2_;
#endif
}
#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_2intersect_epi64
#define _mm256_2intersect_epi64(a,b, k1, k2) simde_mm256_2intersect_epi64(a, b, k1, k2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm512_2intersect_epi32(simde__m512i a, simde__m512i b, simde__mmask16 *k1, simde__mmask16 *k2) {
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE)
_mm512_2intersect_epi32(a, b, k1, k2);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask16
k1_ = 0,
k2_ = 0;
for (size_t i = 0 ; i < sizeof(a_.i32) / sizeof(a_.i32[0]) ; i++) {
#if defined(SIMDE_ENABLE_OPENMP)
#pragma omp simd reduction(|:k1_) reduction(|:k2_)
#else
SIMDE_VECTORIZE
#endif
for (size_t j = 0 ; j < sizeof(b_.i32) / sizeof(b_.i32[0]) ; j++) {
const int32_t m = a_.i32[i] == b_.i32[j];
k1_ |= m << i;
k2_ |= m << j;
}
}
*k1 = k1_;
*k2 = k2_;
#endif
}
#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES)
#undef _mm512_2intersect_epi32
#define _mm512_2intersect_epi32(a, b, k1, k2) simde_mm512_2intersect_epi32(a, b, k1, k2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm512_2intersect_epi64(simde__m512i a, simde__m512i b, simde__mmask8 *k1, simde__mmask8 *k2) {
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE)
_mm512_2intersect_epi64(a, b, k1, k2);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask8
k1_ = 0,
k2_ = 0;
for (size_t i = 0 ; i < sizeof(a_.i64) / sizeof(a_.i64[0]) ; i++) {
#if defined(SIMDE_ENABLE_OPENMP)
#pragma omp simd reduction(|:k1_) reduction(|:k2_)
#else
SIMDE_VECTORIZE
#endif
for (size_t j = 0 ; j < sizeof(b_.i64) / sizeof(b_.i64[0]) ; j++) {
const int32_t m = a_.i64[i] == b_.i64[j];
k1_ |= m << i;
k2_ |= m << j;
}
}
*k1 = k1_;
*k2 = k2_;
#endif
}
#if defined(SIMDE_X86_AVX512VP2INTERSECT_ENABLE_NATIVE_ALIASES)
#undef _mm512_2intersect_epi64
#define _mm512_2intersect_epi64(a, b, k1, k2) simde_mm512_2intersect_epi64(a, b, k1, k2)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_2INTERSECT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/andnot.h | .h | 7,564 | 194 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_ANDNOT_H)
#define SIMDE_X86_AVX512_ANDNOT_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_andnot_ps(a, b) _mm512_andnot_ps(a, b)
#else
#define simde_mm512_andnot_ps(a, b) simde_mm512_castsi512_ps(simde_mm512_andnot_si512(simde_mm512_castps_si512(a), simde_mm512_castps_si512(b)))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_andnot_ps
#define _mm512_andnot_ps(a, b) simde_mm512_andnot_ps(a, b)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_mask_andnot_ps(src, k, a, b) _mm512_mask_andnot_ps((src), (k), (a), (b))
#else
#define simde_mm512_mask_andnot_ps(src, k, a, b) simde_mm512_castsi512_ps(simde_mm512_mask_andnot_epi32(simde_mm512_castps_si512(src), k, simde_mm512_castps_si512(a), simde_mm512_castps_si512(b)))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_andnot_ps
#define _mm512_mask_andnot_ps(src, k, a, b) simde_mm512_mask_andnot_ps(src, k, a, b)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_maskz_andnot_ps(k, a, b) _mm512_maskz_andnot_ps((k), (a), (b))
#else
#define simde_mm512_maskz_andnot_ps(k, a, b) simde_mm512_castsi512_ps(simde_mm512_maskz_andnot_epi32(k, simde_mm512_castps_si512(a), simde_mm512_castps_si512(b)))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_andnot_ps
#define _mm512_maskz_andnot_ps(k, a, b) simde_mm512_maskz_andnot_ps(k, a, b)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_andnot_pd(a, b) _mm512_andnot_pd(a, b)
#else
#define simde_mm512_andnot_pd(a, b) simde_mm512_castsi512_pd(simde_mm512_andnot_si512(simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b)))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_andnot_pd
#define _mm512_andnot_pd(a, b) simde_mm512_andnot_pd(a, b)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_mask_andnot_pd(src, k, a, b) _mm512_mask_andnot_pd((src), (k), (a), (b))
#else
#define simde_mm512_mask_andnot_pd(src, k, a, b) simde_mm512_castsi512_pd(simde_mm512_mask_andnot_epi64(simde_mm512_castpd_si512(src), k, simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b)))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_andnot_pd
#define _mm512_mask_andnot_pd(src, k, a, b) simde_mm512_mask_andnot_pd(src, k, a, b)
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
#define simde_mm512_maskz_andnot_pd(k, a, b) _mm512_maskz_andnot_pd((k), (a), (b))
#else
#define simde_mm512_maskz_andnot_pd(k, a, b) simde_mm512_castsi512_pd(simde_mm512_maskz_andnot_epi64(k, simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b)))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_andnot_pd
#define _mm512_maskz_andnot_pd(k, a, b) simde_mm512_maskz_andnot_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_andnot_si512 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_andnot_si512(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_andnot_si256(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_andnot_si256(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = ~(a_.i32f[i]) & b_.i32f[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#define simde_mm512_andnot_epi32(a, b) simde_mm512_andnot_si512(a, b)
#define simde_mm512_andnot_epi64(a, b) simde_mm512_andnot_si512(a, b)
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_andnot_si512
#define _mm512_andnot_si512(a, b) simde_mm512_andnot_si512(a, b)
#undef _mm512_andnot_epi32
#define _mm512_andnot_epi32(a, b) simde_mm512_andnot_si512(a, b)
#undef _mm512_andnot_epi64
#define _mm512_andnot_epi64(a, b) simde_mm512_andnot_si512(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_andnot_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_andnot_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_andnot_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_andnot_epi32
#define _mm512_mask_andnot_epi32(src, k, a, b) simde_mm512_mask_andnot_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_andnot_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_andnot_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_andnot_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_andnot_epi32
#define _mm512_maskz_andnot_epi32(k, a, b) simde_mm512_maskz_andnot_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_andnot_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_andnot_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_andnot_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_andnot_epi64
#define _mm512_mask_andnot_epi64(src, k, a, b) simde_mm512_mask_andnot_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_andnot_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_andnot_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_andnot_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_andnot_epi64
#define _mm512_maskz_andnot_epi64(k, a, b) simde_mm512_maskz_andnot_epi64(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ANDNOT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cmpeq.h | .h | 7,901 | 242 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_CMPEQ_H)
#define SIMDE_X86_AVX512_CMPEQ_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "mov_mask.h"
#include "cmp.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_cmpeq_epi8_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpeq_epi8_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask64 r;
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
const uint32_t t = HEDLEY_STATIC_CAST(uint32_t, simde_mm256_movemask_epi8(simde_mm256_cmpeq_epi8(a_.m256i[i], b_.m256i[i])));
r |= HEDLEY_STATIC_CAST(uint64_t, t) << (i * 32);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m512i_private tmp;
tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.i8 == b_.i8);
r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp));
#else
r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r |= (a_.u8[i] == b_.u8[i]) ? (UINT64_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpeq_epi8_mask
#define _mm512_cmpeq_epi8_mask(a, b) simde_mm512_cmpeq_epi8_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask64
simde_mm512_mask_cmpeq_epi8_mask(simde__mmask64 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmpeq_epi8_mask(k1, a, b);
#else
return simde_mm512_cmpeq_epi8_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpeq_epi8_mask
#define _mm512_mask_cmpeq_epi8_mask(k1, a, b) simde_mm512_mask_cmpeq_epi8_mask((k1), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmpeq_epi32_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpeq_epi32_mask(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_cmpeq_epi32(a_.m256i[i], b_.m256i[i]);
}
return simde_mm512_movepi32_mask(simde__m512i_from_private(r_));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpeq_epi32_mask
#define _mm512_cmpeq_epi32_mask(a, b) simde_mm512_cmpeq_epi32_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_mask_cmpeq_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpeq_epi32_mask(k1, a, b);
#else
return simde_mm512_cmpeq_epi32_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpeq_epi32_mask
#define _mm512_mask_cmpeq_epi32_mask(k1, a, b) simde_mm512_mask_cmpeq_epi32_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmpeq_epi64_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cmpeq_epi64_mask(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_cmpeq_epi64(a_.m256i[i], b_.m256i[i]);
}
return simde_mm512_movepi64_mask(simde__m512i_from_private(r_));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpeq_epi64_mask
#define _mm512_cmpeq_epi64_mask(a, b) simde_mm512_cmpeq_epi64_mask(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_mask_cmpeq_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_cmpeq_epi64_mask(k1, a, b);
#else
return simde_mm512_cmpeq_epi64_mask(a, b) & k1;
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpeq_epi64_mask
#define _mm512_mask_cmpeq_epi64_mask(k1, a, b) simde_mm512_mask_cmpeq_epi64_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_cmpeq_epu16_mask (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cmpeq_epu16_mask(a, b);
#else
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
simde__mmask32 r;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m512i_private tmp;
tmp.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.u16), a_.u16 == b_.u16);
r = simde_mm512_movepi16_mask(simde__m512i_from_private(tmp));
#else
r = 0;
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) {
r |= (a_.u16[i] == b_.u16[i]) ? (UINT16_C(1) << i) : 0;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpeq_epu16_mask
#define _mm512_cmpeq_epu16_mask(a, b) simde_mm512_cmpeq_epu16_mask((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_mask_cmpeq_epu16_mask(simde__mmask32 k1, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cmpeq_epu16_mask(k1, a, b);
#else
return k1 & simde_mm512_cmpeq_epu16_mask(a, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cmpeq_epu16_mask
#define _mm512_mask_cmpeq_epu16_mask(k1, a, b) simde_mm512_mask_cmpeq_epu16_mask(k1, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask16
simde_mm512_cmpeq_ps_mask (simde__m512 a, simde__m512 b) {
return simde_mm512_cmp_ps_mask(a, b, SIMDE_CMP_EQ_OQ);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpeq_ps_mask
#define _mm512_cmpeq_ps_mask(a, b) simde_mm512_cmp_ps_mask(a, b, SIMDE_CMP_EQ_OQ)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_cmpeq_pd_mask (simde__m512d a, simde__m512d b) {
return simde_mm512_cmp_pd_mask(a, b, SIMDE_CMP_EQ_OQ);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cmpeq_pd_mask
#define _mm512_cmpeq_pd_mask(a, b) simde_mm512_cmp_pd_mask(a, b, SIMDE_CMP_EQ_OQ)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CMPEQ_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/srli.h | .h | 6,641 | 181 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SRLI_H)
#define SIMDE_X86_AVX512_SRLI_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "setzero.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srli_epi16 (simde__m512i a, const unsigned int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
simde__m512i r;
SIMDE_CONSTIFY_16_(_mm512_srli_epi16, r, simde_mm512_setzero_si512(), imm8, a);
return r;
#elif defined(SIMDE_X86_AVX512BW_NATIVE)
return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_srli_epi16(a, imm8));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
if (HEDLEY_STATIC_CAST(unsigned int, imm8) > 15)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = a_.u16 >> SIMDE_CAST_VECTOR_SHIFT_COUNT(16, imm8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = a_.u16[i] >> imm8;
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_NATIVE)
#define simde_mm512_srli_epi16(a, imm8) _mm512_srli_epi16(a, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_srli_epi16
#define _mm512_srli_epi16(a, imm8) simde_mm512_srli_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srli_epi32 (simde__m512i a, unsigned int imm8) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
simde__m512i r;
SIMDE_CONSTIFY_32_(_mm512_srli_epi32, r, simde_mm512_setzero_si512(), imm8, a);
return r;
#elif defined(SIMDE_X86_AVX512F_NATIVE)
return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_srli_epi32(a, imm8));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_srli_epi32(a_.m256i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m256i[1] = simde_mm256_srli_epi32(a_.m256i[1], HEDLEY_STATIC_CAST(int, imm8));
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_srli_epi32(a_.m128i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[1] = simde_mm_srli_epi32(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[2] = simde_mm_srli_epi32(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[3] = simde_mm_srli_epi32(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8));
#else
if (imm8 > 31) {
simde_memset(&r_, 0, sizeof(r_));
} else {
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = a_.u32 >> imm8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] >> imm8;
}
#endif
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_srli_epi32
#define _mm512_srli_epi32(a, imm8) simde_mm512_srli_epi32(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srli_epi64 (simde__m512i a, unsigned int imm8) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
simde__m512i r;
SIMDE_CONSTIFY_64_(_mm512_srli_epi64, r, simde_mm512_setzero_si512(), imm8, a);
return r;
#elif defined(SIMDE_X86_AVX512F_NATIVE)
return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_srli_epi64(a, imm8));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_srli_epi64(a_.m256i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m256i[1] = simde_mm256_srli_epi64(a_.m256i[1], HEDLEY_STATIC_CAST(int, imm8));
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_srli_epi64(a_.m128i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[1] = simde_mm_srli_epi64(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[2] = simde_mm_srli_epi64(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[3] = simde_mm_srli_epi64(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8));
#else
/* The Intel Intrinsics Guide says that only the 8 LSBits of imm8 are
* used. In this case we should do "imm8 &= 0xff" here. However in
* practice all bits are used. */
if (imm8 > 63) {
simde_memset(&r_, 0, sizeof(r_));
} else {
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_97248)
r_.u64 = a_.u64 >> imm8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = a_.u64[i] >> imm8;
}
#endif
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_srli_epi64
#define _mm512_srli_epi64(a, imm8) simde_mm512_srli_epi64(a, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SRLI_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/add.h | .h | 21,097 | 642 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_ADD_H)
#define SIMDE_X86_AVX512_ADD_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_add_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_add_epi8(src, k, a, b);
#else
return simde_mm_mask_mov_epi8(src, k, simde_mm_add_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_add_epi8
#define _mm_mask_add_epi8(src, k, a, b) simde_mm_mask_add_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_add_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_maskz_add_epi8(k, a, b);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_add_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_add_epi8
#define _mm_maskz_add_epi8(k, a, b) simde_mm_maskz_add_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_add_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_add_epi16(src, k, a, b);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_add_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_add_epi16
#define _mm_mask_add_epi16(src, k, a, b) simde_mm_mask_add_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_add_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_maskz_add_epi16(k, a, b);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_add_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_add_epi16
#define _mm_maskz_add_epi16(k, a, b) simde_mm_maskz_add_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_add_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_add_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_add_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_add_epi32
#define _mm_mask_add_epi32(src, k, a, b) simde_mm_mask_add_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_add_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_add_epi32(k, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_add_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_add_epi32
#define _mm_maskz_add_epi32(k, a, b) simde_mm_maskz_add_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_add_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_add_epi64(src, k, a, b);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_add_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_add_epi64
#define _mm_mask_add_epi64(src, k, a, b) simde_mm_mask_add_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_add_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_add_epi64(k, a, b);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_add_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_add_epi64
#define _mm_maskz_add_epi64(k, a, b) simde_mm_maskz_add_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_add_ss(simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return _mm_mask_add_ss(src, k, a, b);
#elif 1
simde__m128_private
src_ = simde__m128_to_private(src),
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
r_ = simde__m128_to_private(a);
r_.f32[0] = (k & 1) ? (a_.f32[0] + b_.f32[0]) : src_.f32[0];
return simde__m128_from_private(r_);
#else
return simde_mm_move_ss(a, simde_mm_mask_mov_ps(src, k, simde_mm_add_ps(a, b)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_add_ss
#define _mm_mask_add_ss(src, k, a, b) simde_mm_mask_add_ss(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_add_ss(simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return _mm_maskz_add_ss(k, a, b);
#elif 1
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
r_ = simde__m128_to_private(a);
r_.f32[0] = (k & 1) ? (a_.f32[0] + b_.f32[0]) : 0.0f;
return simde__m128_from_private(r_);
#else
return simde_mm_move_ss(a, simde_mm_maskz_mov_ps(k, simde_mm_add_ps(a, b)));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_add_ss
#define _mm_maskz_add_ss(k, a, b) simde_mm_maskz_add_ss(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_add_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_add_epi16(src, k, a, b);
#else
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_add_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_add_epi16
#define _mm256_mask_add_epi16(src, k, a, b) simde_mm256_mask_add_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_add_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_maskz_add_epi16(k, a, b);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_add_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_add_epi16
#define _mm256_maskz_add_epi16(k, a, b) simde_mm256_maskz_add_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_add_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_add_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_add_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_add_epi32
#define _mm256_mask_add_epi32(src, k, a, b) simde_mm256_mask_add_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_add_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_add_epi32(k, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_add_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_add_epi32
#define _mm256_maskz_add_epi32(k, a, b) simde_mm256_maskz_add_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_add_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_add_epi64(src, k, a, b);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_add_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_add_epi64
#define _mm256_mask_add_epi64(src, k, a, b) simde_mm256_mask_add_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_add_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_add_epi64(k, a, b);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_add_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_add_epi64
#define _mm256_maskz_add_epi64(k, a, b) simde_mm256_maskz_add_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_add_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_add_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = a_.i8 + b_.i8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_add_epi8(a_.m256i[i], b_.m256i[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_add_epi8
#define _mm512_add_epi8(a, b) simde_mm512_add_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_add_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_add_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_add_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_add_epi8
#define _mm512_mask_add_epi8(src, k, a, b) simde_mm512_mask_add_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_add_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_add_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_add_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_add_epi8
#define _mm512_maskz_add_epi8(k, a, b) simde_mm512_maskz_add_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_add_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_add_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = a_.i16 + b_.i16;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_add_epi16(a_.m256i[i], b_.m256i[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_add_epi16
#define _mm512_add_epi16(a, b) simde_mm512_add_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_add_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_add_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_add_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_add_epi16
#define _mm512_mask_add_epi16(src, k, a, b) simde_mm512_mask_add_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_add_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_add_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_add_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_add_epi16
#define _mm512_maskz_add_epi16(k, a, b) simde_mm512_maskz_add_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_add_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_add_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_ARM_SVE_NATIVE)
const size_t n = sizeof(a_.i32) / sizeof(a_.i32[0]);
size_t i = 0;
svbool_t pg = svwhilelt_b32(i, n);
do {
svint32_t
va = svld1_s32(pg, &(a_.i32[i])),
vb = svld1_s32(pg, &(b_.i32[i]));
svst1_s32(pg, &(r_.i32[i]), svadd_s32_x(pg, va, vb));
i += svcntw();
pg = svwhilelt_b32(i, n);
} while (svptest_any(svptrue_b32(), pg));
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_add_epi32(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 + b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_add_epi32(a_.m256i[i], b_.m256i[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_add_epi32
#define _mm512_add_epi32(a, b) simde_mm512_add_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_add_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_add_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_add_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_add_epi32
#define _mm512_mask_add_epi32(src, k, a, b) simde_mm512_mask_add_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_add_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_add_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_add_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_add_epi32
#define _mm512_maskz_add_epi32(k, a, b) simde_mm512_maskz_add_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_add_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_add_epi64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_add_epi64(a_.m256i[i], b_.m256i[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS)
r_.i64 = a_.i64 + b_.i64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_add_epi64(a_.m256i[i], b_.m256i[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_add_epi64
#define _mm512_add_epi64(a, b) simde_mm512_add_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_add_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_add_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_add_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_add_epi64
#define _mm512_mask_add_epi64(src, k, a, b) simde_mm512_mask_add_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_add_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_add_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_add_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_add_epi64
#define _mm512_maskz_add_epi64(k, a, b) simde_mm512_maskz_add_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_add_ps (simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_add_ps(a, b);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
r_.m256[i] = simde_mm256_add_ps(a_.m256[i], b_.m256[i]);
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_add_ps
#define _mm512_add_ps(a, b) simde_mm512_add_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_add_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_add_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_add_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_add_ps
#define _mm512_mask_add_ps(src, k, a, b) simde_mm512_mask_add_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_add_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_add_ps(k, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_add_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_add_ps
#define _mm512_maskz_add_ps(k, a, b) simde_mm512_maskz_add_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_add_pd (simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_add_pd(a, b);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = a_.f64 + b_.f64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) {
r_.m256d[i] = simde_mm256_add_pd(a_.m256d[i], b_.m256d[i]);
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_add_pd
#define _mm512_add_pd(a, b) simde_mm512_add_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_add_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_add_pd(src, k, a, b);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_add_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_add_pd
#define _mm512_mask_add_pd(src, k, a, b) simde_mm512_mask_add_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_add_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_add_pd(k, a, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_add_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_add_pd
#define _mm512_maskz_add_pd(k, a, b) simde_mm512_maskz_add_pd(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cvtus.h | .h | 2,475 | 68 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2023 Michael R. Crusoe <crusoe@debian.org>
*/
#if !defined(SIMDE_X86_AVX512_CVTUS_H)
#define SIMDE_X86_AVX512_CVTUS_H
#include "types.h"
#include "mov.h"
#include "storeu.h"
#include "loadu.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm512_mask_cvtusepi32_storeu_epi8 (void* base_addr, simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
_mm512_mask_cvtusepi32_storeu_epi8(base_addr, k, a);
#else
simde__m256i_private r_ = simde__m256i_to_private(simde_mm256_loadu_epi8(base_addr));
simde__m512i_private a_ = simde__m512i_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) {
r_.i8[i] = ((k>>i) &1 ) ?
((a_.u32[i] > UINT8_MAX)
? (HEDLEY_STATIC_CAST(int8_t, UINT8_MAX))
: HEDLEY_STATIC_CAST(int8_t, a_.u32[i])) : r_.i8[i];
}
simde_mm256_storeu_epi8(base_addr, simde__m256i_from_private(r_));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cvtusepi32_storeu_epi8
#define _mm512_mask_cvtusepi32_storeu_epi8(base_addr, k, a) simde_mm512_mask_cvtusepi32_storeu_epi8((base_addr), (k), (a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CVTUS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/blend.h | .h | 9,480 | 294 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_BLEND_H)
#define SIMDE_X86_AVX512_BLEND_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_blend_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_blend_epi8(k, a, b);
#else
return simde_mm_mask_mov_epi8(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_blend_epi8
#define _mm_mask_blend_epi8(k, a, b) simde_mm_mask_blend_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_blend_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_blend_epi16(k, a, b);
#else
return simde_mm_mask_mov_epi16(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_blend_epi16
#define _mm_mask_blend_epi16(k, a, b) simde_mm_mask_blend_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_blend_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_blend_epi32(k, a, b);
#else
return simde_mm_mask_mov_epi32(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_blend_epi32
#define _mm_mask_blend_epi32(k, a, b) simde_mm_mask_blend_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_blend_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_blend_epi64(k, a, b);
#else
return simde_mm_mask_mov_epi64(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_blend_epi64
#define _mm_mask_blend_epi64(k, a, b) simde_mm_mask_blend_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_blend_ps(simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_blend_ps(k, a, b);
#else
return simde_mm_mask_mov_ps(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_blend_ps
#define _mm_mask_blend_ps(k, a, b) simde_mm_mask_blend_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_blend_pd(simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_blend_pd(k, a, b);
#else
return simde_mm_mask_mov_pd(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_blend_pd
#define _mm_mask_blend_pd(k, a, b) simde_mm_mask_blend_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_blend_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_blend_epi8(k, a, b);
#else
return simde_mm256_mask_mov_epi8(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_blend_epi8
#define _mm256_mask_blend_epi8(k, a, b) simde_mm256_mask_blend_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_blend_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm256_mask_blend_epi16(k, a, b);
#else
return simde_mm256_mask_mov_epi16(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_blend_epi16
#define _mm256_mask_blend_epi16(k, a, b) simde_mm256_mask_blend_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_blend_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_blend_epi32(k, a, b);
#else
return simde_mm256_mask_mov_epi32(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_blend_epi32
#define _mm256_mask_blend_epi32(k, a, b) simde_mm256_mask_blend_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_blend_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_blend_epi64(k, a, b);
#else
return simde_mm256_mask_mov_epi64(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_blend_epi64
#define _mm256_mask_blend_epi64(k, a, b) simde_mm256_mask_blend_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_blend_ps(simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_blend_ps(k, a, b);
#else
return simde_mm256_mask_mov_ps(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_blend_ps
#define _mm256_mask_blend_ps(k, a, b) simde_mm256_mask_blend_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_blend_pd(simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_blend_pd(k, a, b);
#else
return simde_mm256_mask_mov_pd(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_blend_pd
#define _mm256_mask_blend_pd(k, a, b) simde_mm256_mask_blend_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_blend_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_blend_epi8(k, a, b);
#else
return simde_mm512_mask_mov_epi8(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_blend_epi8
#define _mm512_mask_blend_epi8(k, a, b) simde_mm512_mask_blend_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_blend_epi16(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_blend_epi16(k, a, b);
#else
return simde_mm512_mask_mov_epi16(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_blend_epi16
#define _mm512_mask_blend_epi16(k, a, b) simde_mm512_mask_blend_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_blend_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_blend_epi32(k, a, b);
#else
return simde_mm512_mask_mov_epi32(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_blend_epi32
#define _mm512_mask_blend_epi32(k, a, b) simde_mm512_mask_blend_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_blend_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_blend_epi64(k, a, b);
#else
return simde_mm512_mask_mov_epi64(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_blend_epi64
#define _mm512_mask_blend_epi64(k, a, b) simde_mm512_mask_blend_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_blend_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_blend_ps(k, a, b);
#else
return simde_mm512_mask_mov_ps(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_blend_ps
#define _mm512_mask_blend_ps(k, a, b) simde_mm512_mask_blend_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_blend_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_blend_pd(k, a, b);
#else
return simde_mm512_mask_mov_pd(a, k, b);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_blend_pd
#define _mm512_mask_blend_pd(k, a, b) simde_mm512_mask_blend_pd(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_BLEND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/sllv.h | .h | 4,081 | 123 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SLLV_H)
#define SIMDE_X86_AVX512_SLLV_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sllv_epi16 (simde__m512i a, simde__m512i b) {
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), (b_.u16 < 16)) & (a_.u16 << b_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (b_.u16[i] < 16) ? HEDLEY_STATIC_CAST(uint16_t, (a_.u16[i] << b_.u16[i])) : 0;
}
#endif
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512BW_NATIVE)
#define simde_mm512_sllv_epi16(a, b) _mm512_sllv_epi16(a, b)
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_sllv_epi16
#define _mm512_sllv_epi16(a, b) simde_mm512_sllv_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sllv_epi32 (simde__m512i a, simde__m512i b) {
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < 32)) & (a_.u32 << b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = (b_.u32[i] < 32) ? HEDLEY_STATIC_CAST(uint32_t, (a_.u32[i] << b_.u32[i])) : 0;
}
#endif
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_sllv_epi32(a, b) _mm512_sllv_epi32(a, b)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_sllv_epi32
#define _mm512_sllv_epi32(a, b) simde_mm512_sllv_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sllv_epi64 (simde__m512i a, simde__m512i b) {
simde__m512i_private
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 << b_.u64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = (b_.u64[i] < 64) ? HEDLEY_STATIC_CAST(uint64_t, (a_.u64[i] << b_.u64[i])) : 0;
}
#endif
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_sllv_epi64(a, b) _mm512_sllv_epi64(a, b)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_sllv_epi64
#define _mm512_sllv_epi64(a, b) simde_mm512_sllv_epi64(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SLLV_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/gather.h | .h | 13,845 | 273 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2023 Michael R. Crusoe <crusoe@debian.org>
*/
#if !defined(SIMDE_X86_AVX512_GATHER_H)
#define SIMDE_X86_AVX512_GATHER_H
#include "types.h"
#include "../avx2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_i32gather_ps(simde__m512i vindex, const void* base_addr, const int32_t scale)
SIMDE_REQUIRE_CONSTANT(scale)
HEDLEY_REQUIRE_MSG((scale && scale <= 8 && !(scale & (scale - 1))), "`scale' must be a power of two less than or equal to 8") {
simde__m512i_private vindex_ = simde__m512i_to_private(vindex);
simde__m512_private r_ = simde__m512_to_private(simde_mm512_setzero_ps());
const uint8_t* addr = HEDLEY_REINTERPRET_CAST(const uint8_t*, base_addr);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(vindex_.i32) / sizeof(vindex_.i32[0])) ; i++) {
const uint8_t* src = addr + (HEDLEY_STATIC_CAST(size_t , vindex_.i32[i]) * HEDLEY_STATIC_CAST(size_t , scale));
simde_float32 dst;
simde_memcpy(&dst, src, sizeof(dst));
r_.f32[i] = dst;
}
return simde__m512_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0))
#define simde_mm512_i32gather_ps(vindex, base_addr, scale) _mm512_i32gather_ps((vindex), (base_addr), (scale))
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_i32gather_ps(vindex, base_addr, scale) SIMDE_STATEMENT_EXPR_(({\
simde__m512_private simde_mm512_i32gather_ps_r_; \
simde__m512i_private simde_mm512_i32gather_ps_vindex_ = simde__m512i_to_private((vindex)); \
simde_mm512_i32gather_ps_r_.m256[0] = _mm256_i32gather_ps( \
HEDLEY_STATIC_CAST(float const*, (base_addr)), simde_mm512_i32gather_ps_vindex_.m256i[0], (scale)); \
simde_mm512_i32gather_ps_r_.m256[1] = _mm256_i32gather_ps( \
HEDLEY_STATIC_CAST(float const*, (base_addr)), simde_mm512_i32gather_ps_vindex_.m256i[1], (scale)); \
simde__m512_from_private(simde_mm512_i32gather_ps_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_i32gather_ps
#define _mm512_i32gather_ps(vindex, base_addr, scale) simde_mm512_i32gather_ps((vindex), (base_addr), (scale))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm512_i64gather_epi32(simde__m512i vindex, const void* base_addr, const int32_t scale)
SIMDE_REQUIRE_CONSTANT(scale)
HEDLEY_REQUIRE_MSG((scale && scale <= 8 && !(scale & (scale - 1))), "`scale' must be a power of two less than or equal to 8") {
simde__m512i_private vindex_;
simde__m256i_private r_;
vindex_ = simde__m512i_to_private(vindex);
r_ = simde__m256i_to_private(simde_mm256_setzero_si256());
const uint8_t* addr = HEDLEY_REINTERPRET_CAST(const uint8_t*, base_addr);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(vindex_.i64) / sizeof(vindex_.i64[0])) ; i++) {
const uint8_t* src = addr + (HEDLEY_STATIC_CAST(size_t , vindex_.i64[i]) * HEDLEY_STATIC_CAST(size_t , scale));
int32_t dst;
simde_memcpy(&dst, src, sizeof(dst));
r_.i32[i] = dst;
}
return simde__m256i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_i64gather_epi32(vindex, base_addr, scale) _mm512_i64gather_epi32((vindex), (base_addr), (scale))
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_i64gather_epi32(vindex, base_addr, scale) SIMDE_STATEMENT_EXPR_(({\
simde__m256i_private simde_mm512_i64gather_epi32_r_; \
simde__m512i_private simde_mm512_i64gather_epi32_vindex_ = simde__m512i_to_private((vindex)); \
simde_mm512_i64gather_epi32_r_.m128i[0] = _mm256_i64gather_epi32( \
HEDLEY_STATIC_CAST(int const*, (base_addr)), simde_mm512_i64gather_epi32_vindex_.m256i[0], (scale)); \
simde_mm512_i64gather_epi32_r_.m128i[1] = _mm256_i64gather_epi32( \
HEDLEY_STATIC_CAST(int const*, (base_addr)), simde_mm512_i64gather_epi32_vindex_.m256i[1], (scale)); \
simde__m256i_from_private(simde_mm512_i64gather_epi32_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_i64gather_epi32
#define _mm512_i64gather_epi32(vindex, base_addr, scale) simde_mm512_i64gather_epi32((vindex), (base_addr), (scale))
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_i64gather_epi32(src, k, vindex, base_addr, scale) _mm512_mask_i64gather_epi32((src), (k), (vindex), (base_addr), (scale))
#else
#define simde_mm512_mask_i64gather_epi32(src, k, vindex, base_addr, scale) simde_mm256_mask_mov_epi32(src, k, simde_mm512_i64gather_epi32((vindex), (base_addr), (scale)))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_i64gather_epi32
#define _mm512_mask_i64gather_epi32(src, k, vindex, base_addr, scale) simde_mm512_mask_i64gather_epi32((src), (k), (vindex), (base_addr), (scale))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_i64gather_epi64(simde__m512i vindex, const void* base_addr, const int32_t scale)
SIMDE_REQUIRE_CONSTANT(scale)
HEDLEY_REQUIRE_MSG((scale && scale <= 8 && !(scale & (scale - 1))), "`scale' must be a power of two less than or equal to 8") {
simde__m512i_private
vindex_ = simde__m512i_to_private(vindex),
r_ = simde__m512i_to_private(simde_mm512_setzero_si512());
const uint8_t* addr = HEDLEY_REINTERPRET_CAST(const uint8_t*, base_addr);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(vindex_.i64) / sizeof(vindex_.i64[0])) ; i++) {
const uint8_t* src = addr + (HEDLEY_STATIC_CAST(size_t , vindex_.i64[i]) * HEDLEY_STATIC_CAST(size_t , scale));
int64_t dst;
simde_memcpy(&dst, src, sizeof(dst));
r_.i64[i] = dst;
}
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_i64gather_epi64(vindex, base_addr, scale) _mm512_i64gather_epi64((vindex), (base_addr), (scale))
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_i64gather_epi64(vindex, base_addr, scale) SIMDE_STATEMENT_EXPR_(({\
simde__m512i_private simde_mm512_i64gather_epi64_r_, \
simde_mm512_i64gather_epi64_vindex_ = simde__m512i_to_private((vindex)); \
simde_mm512_i64gather_epi64_r_.m256i[0] = _mm256_i64gather_epi64( \
HEDLEY_STATIC_CAST(int64_t const*, (base_addr)), simde_mm512_i64gather_epi64_vindex_.m256i[0], (scale)); \
simde_mm512_i64gather_epi64_r_.m256i[1] = _mm256_i64gather_epi64( \
HEDLEY_STATIC_CAST(int64_t const*, (base_addr)), simde_mm512_i64gather_epi64_vindex_.m256i[1], (scale)); \
simde__m512i_from_private(simde_mm512_i64gather_epi64_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_i64gather_epi64
#define _mm512_i64gather_epi64(vindex, base_addr, scale) simde_mm512_i64gather_epi64(vindex, (base_addr), (scale))
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_i64gather_epi64(src, k, vindex, base_addr, scale) _mm512_mask_i64gather_epi64((src), (k), (vindex), (base_addr), (scale))
#else
#define simde_mm512_mask_i64gather_epi64(src, k, vindex, base_addr, scale) simde_mm512_mask_mov_epi64((src), (k), simde_mm512_i64gather_epi64((vindex), (base_addr), (scale)))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_i64gather_epi64
#define _mm512_mask_i64gather_epi64(src, k, vindex, base_addr, scale) simde_mm512_mask_i64gather_epi64((src), (k), (vindex), (base_addr), (scale))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_i64gather_pd(simde__m512i vindex, const void* base_addr, const int32_t scale)
SIMDE_REQUIRE_CONSTANT(scale)
HEDLEY_REQUIRE_MSG((scale && scale <= 8 && !(scale & (scale - 1))), "`scale' must be a power of two less than or equal to 8") {
simde__m512i_private vindex_;
simde__m512d_private r_;
vindex_ = simde__m512i_to_private(vindex);
r_ = simde__m512d_to_private(simde_mm512_setzero_pd());
const uint8_t* addr = HEDLEY_REINTERPRET_CAST(const uint8_t*, base_addr);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(vindex_.i64) / sizeof(vindex_.i64[0])) ; i++) {
const uint8_t* src = addr + (HEDLEY_STATIC_CAST(size_t , vindex_.i64[i]) * HEDLEY_STATIC_CAST(size_t , scale));
simde_float64 dst;
simde_memcpy(&dst, src, sizeof(dst));
r_.f64[i] = dst;
}
return simde__m512d_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_i64gather_pd(vindex, base_addr, scale) _mm512_i64gather_pd((vindex), (base_addr), (scale))
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_i64gather_pd(vindex, base_addr, scale) SIMDE_STATEMENT_EXPR_(({\
simde__m512d_private simde_mm512_i64gather_pd_r_; \
simde__m512i_private simde_mm512_i64gather_pd_vindex_ = simde__m512i_to_private((vindex)); \
simde_mm512_i64gather_pd_r_.m256d[0] = _mm256_i64gather_pd( \
HEDLEY_STATIC_CAST(double const*, (base_addr)), simde_mm512_i64gather_pd_vindex_.m256i[0], (scale)); \
simde_mm512_i64gather_pd_r_.m256d[1] = _mm256_i64gather_pd( \
HEDLEY_STATIC_CAST(double const*, (base_addr)), simde_mm512_i64gather_pd_vindex_.m256i[1], (scale)); \
simde__m512d_from_private(simde_mm512_i64gather_pd_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_i64gather_pd
#define _mm512_i64gather_pd(vindex, base_addr, scale) simde_mm512_i64gather_pd((vindex), (base_addr), (scale))
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_i64gather_pd(src, k, vindex, base_addr, scale) _mm512_mask_i64gather_pd((src), (k), (vindex), (base_addr), (scale))
#else
#define simde_mm512_mask_i64gather_pd(src, k, vindex, base_addr, scale) simde_mm512_mask_mov_pd((src), (k), simde_mm512_i64gather_pd((vindex), (base_addr), (scale)))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_i64gather_pd
#define _mm512_mask_i64gather_pd(src, k, vindex, base_addr, scale) simde_mm512_mask_i64gather_pd((src), (k), (vindex), (base_addr), (scale))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm512_i64gather_ps(simde__m512i vindex, const void* base_addr, const int32_t scale)
SIMDE_REQUIRE_CONSTANT(scale)
HEDLEY_REQUIRE_MSG((scale && scale <= 8 && !(scale & (scale - 1))), "`scale' must be a power of two less than or equal to 8") {
simde__m512i_private vindex_;
simde__m256_private r_;
vindex_ = simde__m512i_to_private(vindex);
r_ = simde__m256_to_private(simde_mm256_setzero_ps());
const uint8_t* addr = HEDLEY_REINTERPRET_CAST(const uint8_t*, base_addr);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(vindex_.i64) / sizeof(vindex_.i64[0])) ; i++) {
const uint8_t* src = addr + (HEDLEY_STATIC_CAST(size_t , vindex_.i64[i]) * HEDLEY_STATIC_CAST(size_t , scale));
simde_float32 dst;
simde_memcpy(&dst, src, sizeof(dst));
r_.f32[i] = dst;
}
return simde__m256_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_i64gather_ps(vindex, base_addr, scale) _mm512_i64gather_ps((vindex), (base_addr), (scale))
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_i64gather_ps(vindex, base_addr, scale) SIMDE_STATEMENT_EXPR_(({\
simde__m256_private simde_mm512_i64gather_ps_r_; \
simde__m512i_private simde_mm512_i64gather_ps_vindex_ = simde__m512i_to_private((vindex)); \
simde_mm512_i64gather_ps_r_.m128[0] = _mm256_i64gather_ps( \
HEDLEY_STATIC_CAST(float const*, (base_addr)), simde_mm512_i64gather_ps_vindex_.m256i[0], (scale)); \
simde_mm512_i64gather_ps_r_.m128[1] = _mm256_i64gather_ps( \
HEDLEY_STATIC_CAST(float const*, (base_addr)), simde_mm512_i64gather_ps_vindex_.m256i[1], (scale)); \
simde__m256_from_private(simde_mm512_i64gather_ps_r_); \
}))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_i64gather_ps
#define _mm512_i64gather_ps(vindex, base_addr, scale) simde_mm512_i64gather_ps((vindex), (base_addr), (scale))
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_i64gather_ps(src, k, vindex, base_addr, scale) _mm512_mask_i64gather_ps((src), (k), (vindex), (base_addr), (scale))
#else
#define simde_mm512_mask_i64gather_ps(src, k, vindex, base_addr, scale) simde_mm256_mask_mov_ps((src), (k), simde_mm512_i64gather_ps((vindex), (base_addr), (scale)))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_i64gather_ps
#define _mm512_mask_i64gather_ps(src, k, vindex, base_addr, scale) simde_mm512_mask_i64gather_ps((src), (k), (vindex), (base_addr), (scale))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_GATHER_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/sad.h | .h | 2,638 | 78 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SAD_H)
#define SIMDE_X86_AVX512_SAD_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_sad_epu8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_sad_epu8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_sad_epu8(a_.m256i[i], b_.m256i[i]);
}
#else
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
uint16_t tmp = 0;
SIMDE_VECTORIZE_REDUCTION(+:tmp)
for (size_t j = 0 ; j < ((sizeof(r_.u8) / sizeof(r_.u8[0])) / 8) ; j++) {
const size_t e = j + (i * 8);
tmp += (a_.u8[e] > b_.u8[e]) ? (a_.u8[e] - b_.u8[e]) : (b_.u8[e] - a_.u8[e]);
}
r_.i64[i] = tmp;
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_sad_epu8
#define _mm512_sad_epu8(a, b) simde_mm512_sad_epu8(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SAD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/lzcnt.h | .h | 7,815 | 221 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_AVX512_LZCNT_H)
#define SIMDE_X86_AVX512_LZCNT_H
#include "types.h"
#include "mov.h"
#if HEDLEY_MSVC_VERSION_CHECK(14,0,0)
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#if defined(_M_AMD64) || defined(_M_ARM64)
#pragma intrinsic(_BitScanReverse64)
#endif
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if \
( HEDLEY_HAS_BUILTIN(__builtin_clz) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) ) && \
defined(__INT_MAX__) && defined(__LONG_MAX__) && defined(__LONG_LONG_MAX__) && \
defined(__INT32_MAX__) && defined(__INT64_MAX__)
#if __INT_MAX__ == __INT32_MAX__
#define simde_x_clz32(v) __builtin_clz(HEDLEY_STATIC_CAST(unsigned int, (v)))
#elif __LONG_MAX__ == __INT32_MAX__
#define simde_x_clz32(v) __builtin_clzl(HEDLEY_STATIC_CAST(unsigned long, (v)))
#elif __LONG_LONG_MAX__ == __INT32_MAX__
#define simde_x_clz32(v) __builtin_clzll(HEDLEY_STATIC_CAST(unsigned long long, (v)))
#endif
#if __INT_MAX__ == __INT64_MAX__
#define simde_x_clz64(v) __builtin_clz(HEDLEY_STATIC_CAST(unsigned int, (v)))
#elif __LONG_MAX__ == __INT64_MAX__
#define simde_x_clz64(v) __builtin_clzl(HEDLEY_STATIC_CAST(unsigned long, (v)))
#elif __LONG_LONG_MAX__ == __INT64_MAX__
#define simde_x_clz64(v) __builtin_clzll(HEDLEY_STATIC_CAST(unsigned long long, (v)))
#endif
#elif HEDLEY_MSVC_VERSION_CHECK(14,0,0)
static int simde_x_clz32(uint32_t x) {
unsigned long r;
_BitScanReverse(&r, x);
return 31 - HEDLEY_STATIC_CAST(int, r);
}
#define simde_x_clz32 simde_x_clz32
static int simde_x_clz64(uint64_t x) {
unsigned long r;
#if defined(_M_AMD64) || defined(_M_ARM64)
_BitScanReverse64(&r, x);
return 63 - HEDLEY_STATIC_CAST(int, r);
#else
uint32_t high = HEDLEY_STATIC_CAST(uint32_t, x >> 32);
if (high != 0)
return _BitScanReverse(&r, HEDLEY_STATIC_CAST(unsigned long, high));
else
return _BitScanReverse(&r, HEDLEY_STATIC_CAST(unsigned long, x & ~UINT32_C(0))) + 32;
#endif
}
#define simde_x_clz64 simde_x_clz64
#endif
#if !defined(simde_x_clz32) || !defined(simde_x_clz64)
static uint8_t simde_x_avx512cd_lz_lookup(const uint8_t value) {
static const uint8_t lut[256] = {
7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
return lut[value];
};
#if !defined(simde_x_clz32)
static int simde_x_clz32(uint32_t x) {
size_t s = sizeof(x) * 8;
uint32_t r;
while ((s -= 8) != 0) {
r = x >> s;
if (r != 0)
return simde_x_avx512cd_lz_lookup(HEDLEY_STATIC_CAST(uint8_t, r)) +
(((sizeof(x) - 1) * 8) - s);
}
if (x == 0)
return (int) ((sizeof(x) * 8) - 1);
else
return simde_x_avx512cd_lz_lookup(HEDLEY_STATIC_CAST(uint8_t, x)) +
((sizeof(x) - 1) * 8);
}
#endif
#if !defined(simde_x_clz64)
static int simde_x_clz64(uint64_t x) {
size_t s = sizeof(x) * 8;
uint64_t r;
while ((s -= 8) != 0) {
r = x >> s;
if (r != 0)
return simde_x_avx512cd_lz_lookup(HEDLEY_STATIC_CAST(uint8_t, r)) +
(((sizeof(x) - 1) * 8) - s);
}
if (x == 0)
return (int) ((sizeof(x) * 8) - 1);
else
return simde_x_avx512cd_lz_lookup(HEDLEY_STATIC_CAST(uint8_t, x)) +
((sizeof(x) - 1) * 8);
}
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_lzcnt_epi32(simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_lzcnt_epi32(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* https://stackoverflow.com/a/58827596/501126 */
a = _mm_andnot_si128(_mm_srli_epi32(a, 8), a);
a = _mm_castps_si128(_mm_cvtepi32_ps(a));
a = _mm_srli_epi32(a, 23);
a = _mm_subs_epu16(_mm_set1_epi32(158), a);
a = _mm_min_epi16(a, _mm_set1_epi32(32));
return a;
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u32 = vec_cntlz(a_.altivec_u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = (HEDLEY_UNLIKELY(a_.i32[i] == 0) ? HEDLEY_STATIC_CAST(int32_t, sizeof(int32_t) * CHAR_BIT) : HEDLEY_STATIC_CAST(int32_t, simde_x_clz32(HEDLEY_STATIC_CAST(uint32_t, a_.i32[i]))));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES)
#undef _mm_lzcnt_epi32
#define _mm_lzcnt_epi32(a) simde_mm_lzcnt_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_lzcnt_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_mask_lzcnt_epi32(src, k, a);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_lzcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_lzcnt_epi32
#define _mm_mask_lzcnt_epi32(src, k, a) simde_mm_mask_lzcnt_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_lzcnt_epi32(simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE)
return _mm_maskz_lzcnt_epi32(k, a);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_lzcnt_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_lzcnt_epi32
#define _mm_maskz_lzcnt_epi32(k, a) simde_mm_maskz_lzcnt_epi32(k, a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_LZCNT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/packus.h | .h | 6,833 | 123 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_PACKUS_H)
#define SIMDE_X86_AVX512_PACKUS_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_packus_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_packus_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_packus_epi16(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_packus_epi16(a_.m256i[1], b_.m256i[1]);
#else
const size_t halfway_point = (sizeof(r_.i8) / sizeof(r_.i8[0])) / 2;
const size_t quarter_point = (sizeof(r_.i8) / sizeof(r_.i8[0])) / 4;
const size_t octet_point = (sizeof(r_.i8) / sizeof(r_.i8[0])) / 8;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < octet_point ; i++) {
r_.u8[i] = (a_.i16[i] > UINT8_MAX) ? UINT8_MAX : ((a_.i16[i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, a_.i16[i]));
r_.u8[i + octet_point] = (b_.i16[i] > UINT8_MAX) ? UINT8_MAX : ((b_.i16[i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, b_.i16[i]));
r_.u8[quarter_point + i] = (a_.i16[octet_point + i] > UINT8_MAX) ? UINT8_MAX : ((a_.i16[octet_point + i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, a_.i16[octet_point + i]));
r_.u8[quarter_point + i + octet_point] = (b_.i16[octet_point + i] > UINT8_MAX) ? UINT8_MAX : ((b_.i16[octet_point + i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, b_.i16[octet_point + i]));
r_.u8[halfway_point + i] = (a_.i16[quarter_point + i] > UINT8_MAX) ? UINT8_MAX : ((a_.i16[quarter_point + i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, a_.i16[quarter_point + i]));
r_.u8[halfway_point + i + octet_point] = (b_.i16[quarter_point + i] > UINT8_MAX) ? UINT8_MAX : ((b_.i16[quarter_point + i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, b_.i16[quarter_point + i]));
r_.u8[halfway_point + quarter_point + i] = (a_.i16[quarter_point + octet_point + i] > UINT8_MAX) ? UINT8_MAX : ((a_.i16[quarter_point + octet_point + i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, a_.i16[quarter_point + octet_point + i]));
r_.u8[halfway_point + quarter_point + i + octet_point] = (b_.i16[quarter_point + octet_point + i] > UINT8_MAX) ? UINT8_MAX : ((b_.i16[quarter_point + octet_point + i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, b_.i16[quarter_point + octet_point + i]));
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_packus_epi16
#define _mm512_packus_epi16(a, b) simde_mm512_packus_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_packus_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_packus_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_packus_epi32(a_.m256i[i], b_.m256i[i]);
}
#else
const size_t halfway_point = (sizeof(r_.i16) / sizeof(r_.i16[0])) / 2;
const size_t quarter_point = (sizeof(r_.i16) / sizeof(r_.i16[0])) / 4;
const size_t octet_point = (sizeof(r_.i16) / sizeof(r_.i16[0])) / 8;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < octet_point ; i++) {
r_.u16[i] = (a_.i32[i] > UINT16_MAX) ? UINT16_MAX : ((a_.i32[i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, a_.i32[i]));
r_.u16[i + octet_point] = (b_.i32[i] > UINT16_MAX) ? UINT16_MAX : ((b_.i32[i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, b_.i32[i]));
r_.u16[quarter_point + i] = (a_.i32[octet_point + i] > UINT16_MAX) ? UINT16_MAX : ((a_.i32[octet_point + i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, a_.i32[octet_point + i]));
r_.u16[quarter_point + i + octet_point] = (b_.i32[octet_point + i] > UINT16_MAX) ? UINT16_MAX : ((b_.i32[octet_point + i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, b_.i32[octet_point + i]));
r_.u16[halfway_point + i] = (a_.i32[quarter_point + i] > UINT16_MAX) ? UINT16_MAX : ((a_.i32[quarter_point +i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, a_.i32[quarter_point + i]));
r_.u16[halfway_point + i + octet_point] = (b_.i32[quarter_point + i] > UINT16_MAX) ? UINT16_MAX : ((b_.i32[quarter_point + i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, b_.i32[quarter_point +i]));
r_.u16[halfway_point + quarter_point + i] = (a_.i32[quarter_point + octet_point + i] > UINT16_MAX) ? UINT16_MAX : ((a_.i32[quarter_point + octet_point + i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, a_.i32[quarter_point + octet_point + i]));
r_.u16[halfway_point + quarter_point + i + octet_point] = (b_.i32[quarter_point + octet_point + i] > UINT16_MAX) ? UINT16_MAX : ((b_.i32[quarter_point + octet_point + i] < 0) ? UINT16_C(0) : HEDLEY_STATIC_CAST(uint16_t, b_.i32[quarter_point + octet_point + i]));
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_packus_epi32
#define _mm512_packus_epi32(a, b) simde_mm512_packus_epi32(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_PACKUS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/roundscale_round.h | .h | 26,912 | 691 | #if !defined(SIMDE_X86_AVX512_ROUNDSCALE_ROUND_H)
#define SIMDE_X86_AVX512_ROUNDSCALE_ROUND_H
#include "types.h"
#include "roundscale.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(HEDLEY_MSVC_VERSION)
#pragma warning( push )
#pragma warning( disable : 4244 )
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_roundscale_round_ps(a, imm8, sae) _mm512_roundscale_round_ps(a, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_roundscale_round_ps(a, imm8, sae) simde_mm512_roundscale_ps(a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_roundscale_round_ps(a,imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512 simde_mm512_roundscale_round_ps_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_roundscale_round_ps_envp; \
int simde_mm512_roundscale_round_ps_x = feholdexcept(&simde_mm512_roundscale_round_ps_envp); \
simde_mm512_roundscale_round_ps_r = simde_mm512_roundscale_ps(a, imm8); \
if (HEDLEY_LIKELY(simde_mm512_roundscale_round_ps_x == 0)) \
fesetenv(&simde_mm512_roundscale_round_ps_envp); \
} \
else { \
simde_mm512_roundscale_round_ps_r = simde_mm512_roundscale_ps(a, imm8); \
} \
\
simde_mm512_roundscale_round_ps_r; \
}))
#else
#define simde_mm512_roundscale_round_ps(a, imm8, sae) simde_mm512_roundscale_ps(a, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_roundscale_round_ps (simde__m512 a, int imm8, int sae)
SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
simde__m512 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_roundscale_ps(a, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_roundscale_ps(a, imm8);
#endif
}
else {
r = simde_mm512_roundscale_ps(a, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_roundscale_round_ps
#define _mm512_roundscale_round_ps(a, imm8, sae) simde_mm512_roundscale_round_ps(a, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) _mm512_mask_roundscale_round_ps(src, k, a, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_mask_roundscale_ps(src, k, a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512 simde_mm512_mask_roundscale_round_ps_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_mask_roundscale_round_ps_envp; \
int simde_mm512_mask_roundscale_round_ps_x = feholdexcept(&simde_mm512_mask_roundscale_round_ps_envp); \
simde_mm512_mask_roundscale_round_ps_r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); \
if (HEDLEY_LIKELY(simde_mm512_mask_roundscale_round_ps_x == 0)) \
fesetenv(&simde_mm512_mask_roundscale_round_ps_envp); \
} \
else { \
simde_mm512_mask_roundscale_round_ps_r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); \
} \
\
simde_mm512_mask_roundscale_round_ps_r; \
}))
#else
#define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_mask_roundscale_ps(src, k, a, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_roundscale_round_ps (simde__m512 src, simde__mmask8 k, simde__m512 a, int imm8, int sae)
SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
simde__m512 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_mask_roundscale_ps(src, k, a, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_mask_roundscale_ps(src, k, a, imm8);
#endif
}
else {
r = simde_mm512_mask_roundscale_ps(src, k, a, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_roundscale_round_ps
#define _mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) _mm512_maskz_roundscale_round_ps(k, a, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) simde_mm512_maskz_roundscale_ps(k, a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512 simde_mm512_maskz_roundscale_round_ps_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_maskz_roundscale_round_ps_envp; \
int simde_mm512_maskz_roundscale_round_ps_x = feholdexcept(&simde_mm512_maskz_roundscale_round_ps_envp); \
simde_mm512_maskz_roundscale_round_ps_r = simde_mm512_maskz_roundscale_ps(k, a, imm8); \
if (HEDLEY_LIKELY(simde_mm512_maskz_roundscale_round_ps_x == 0)) \
fesetenv(&simde_mm512_maskz_roundscale_round_ps_envp); \
} \
else { \
simde_mm512_maskz_roundscale_round_ps_r = simde_mm512_maskz_roundscale_ps(k, a, imm8); \
} \
\
simde_mm512_maskz_roundscale_round_ps_r; \
}))
#else
#define simde_mm512_maskz_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_maskz_roundscale_ps(k, a, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_roundscale_round_ps (simde__mmask8 k, simde__m512 a, int imm8, int sae)
SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
simde__m512 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_maskz_roundscale_ps(k, a, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_maskz_roundscale_ps(k, a, imm8);
#endif
}
else {
r = simde_mm512_maskz_roundscale_ps(k, a, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_roundscale_round_ps
#define _mm512_maskz_roundscale_round_ps(k, a, imm8, sae) simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_roundscale_round_pd(a, imm8, sae) _mm512_roundscale_round_pd(a, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_roundscale_round_pd(a, imm8, sae) simde_mm512_roundscale_pd(a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_roundscale_round_pd(a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d simde_mm512_roundscale_round_pd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_roundscale_round_pd_envp; \
int simde_mm512_roundscale_round_pd_x = feholdexcept(&simde_mm512_roundscale_round_pd_envp); \
simde_mm512_roundscale_round_pd_r = simde_mm512_roundscale_pd(a, imm8); \
if (HEDLEY_LIKELY(simde_mm512_roundscale_round_pd_x == 0)) \
fesetenv(&simde_mm512_roundscale_round_pd_envp); \
} \
else { \
simde_mm512_roundscale_round_pd_r = simde_mm512_roundscale_pd(a, imm8); \
} \
\
simde_mm512_roundscale_round_pd_r; \
}))
#else
#define simde_mm512_roundscale_round_pd(a, imm8, sae) simde_mm512_roundscale_pd(a, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_roundscale_round_pd (simde__m512d a, int imm8, int sae)
SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
simde__m512d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_roundscale_pd(a, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_roundscale_pd(a, imm8);
#endif
}
else {
r = simde_mm512_roundscale_pd(a, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_roundscale_round_pd
#define _mm512_roundscale_round_pd(a, imm8, sae) simde_mm512_roundscale_round_pd(a, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) _mm512_mask_roundscale_round_pd(src, k, a, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_mask_roundscale_pd(src, k, a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d simde_mm512_mask_roundscale_round_pd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_mask_roundscale_round_pd_envp; \
int simde_mm512_mask_roundscale_round_pd_x = feholdexcept(&simde_mm512_mask_roundscale_round_pd_envp); \
simde_mm512_mask_roundscale_round_pd_r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); \
if (HEDLEY_LIKELY(simde_mm512_mask_roundscale_round_pd_x == 0)) \
fesetenv(&simde_mm512_mask_roundscale_round_pd_envp); \
} \
else { \
simde_mm512_mask_roundscale_round_pd_r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); \
} \
\
simde_mm512_mask_roundscale_round_pd_r; \
}))
#else
#define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_mask_roundscale_pd(src, k, a, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_roundscale_round_pd (simde__m512d src, simde__mmask8 k, simde__m512d a, int imm8, int sae)
SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
simde__m512d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_mask_roundscale_pd(src, k, a, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_mask_roundscale_pd(src, k, a, imm8);
#endif
}
else {
r = simde_mm512_mask_roundscale_pd(src, k, a, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_roundscale_round_pd
#define _mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) _mm512_maskz_roundscale_round_pd(k, a, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) simde_mm512_maskz_roundscale_pd(k, a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d simde_mm512_maskz_roundscale_round_pd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_maskz_roundscale_round_pd_envp; \
int simde_mm512_maskz_roundscale_round_pd_x = feholdexcept(&simde_mm512_maskz_roundscale_round_pd_envp); \
simde_mm512_maskz_roundscale_round_pd_r = simde_mm512_maskz_roundscale_pd(k, a, imm8); \
if (HEDLEY_LIKELY(simde_mm512_maskz_roundscale_round_pd_x == 0)) \
fesetenv(&simde_mm512_maskz_roundscale_round_pd_envp); \
} \
else { \
simde_mm512_maskz_roundscale_round_pd_r = simde_mm512_maskz_roundscale_pd(k, a, imm8); \
} \
\
simde_mm512_maskz_roundscale_round_pd_r; \
}))
#else
#define simde_mm512_maskz_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_maskz_roundscale_pd(k, a, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_roundscale_round_pd (simde__mmask8 k, simde__m512d a, int imm8, int sae)
SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
simde__m512d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_maskz_roundscale_pd(k, a, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_maskz_roundscale_pd(k, a, imm8);
#endif
}
else {
r = simde_mm512_maskz_roundscale_pd(k, a, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_roundscale_round_pd
#define _mm512_maskz_roundscale_round_pd(k, a, imm8, sae) simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_roundscale_round_ss(a, b, imm8, sae) _mm_roundscale_round_ss(a, b, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_roundscale_round_ss(a, b, imm8, sae) simde_mm_roundscale_ss(a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_roundscale_round_ss(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128 simde_mm_roundscale_round_ss_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_roundscale_round_ss_envp; \
int simde_mm_roundscale_round_ss_x = feholdexcept(&simde_mm_roundscale_round_ss_envp); \
simde_mm_roundscale_round_ss_r = simde_mm_roundscale_ss(a, b, imm8); \
if (HEDLEY_LIKELY(simde_mm_roundscale_round_ss_x == 0)) \
fesetenv(&simde_mm_roundscale_round_ss_envp); \
} \
else { \
simde_mm_roundscale_round_ss_r = simde_mm_roundscale_ss(a, b, imm8); \
} \
\
simde_mm_roundscale_round_ss_r; \
}))
#else
#define simde_mm_roundscale_round_ss(a, b, imm8, sae) simde_mm_roundscale_ss(a, b, imm8)
#endif
#elif !(defined(HEDLEY_MSVC_VERSION) && defined(SIMDE_X86_AVX_NATIVE))
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_roundscale_round_ss (simde__m128 a, simde__m128 b, const int imm8, const int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_roundscale_ss(a, b, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_roundscale_ss(a, b, imm8);
#endif
}
else {
r = simde_mm_roundscale_ss(a, b, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_roundscale_round_ss
#define _mm_roundscale_round_ss(a, b, imm8, sae) simde_mm_roundscale_round_ss(a, b, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) _mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_ss(src, k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128 simde_mm_mask_roundscale_round_ss_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_mask_roundscale_round_ss_envp; \
int simde_mm_mask_roundscale_round_ss_x = feholdexcept(&simde_mm_mask_roundscale_round_ss_envp); \
simde_mm_mask_roundscale_round_ss_r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); \
if (HEDLEY_LIKELY(simde_mm_mask_roundscale_round_ss_x == 0)) \
fesetenv(&simde_mm_mask_roundscale_round_ss_envp); \
} \
else { \
simde_mm_mask_roundscale_round_ss_r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); \
} \
\
simde_mm_mask_roundscale_round_ss_r; \
}))
#else
#define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_ss(src, k, a, b, imm8)
#endif
#elif !(defined(HEDLEY_MSVC_VERSION) && defined(SIMDE_X86_AVX_NATIVE))
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_roundscale_round_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b, const int imm8, const int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8);
#endif
}
else {
r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_roundscale_round_ss
#define _mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) _mm_maskz_roundscale_round_ss(k, a, b, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) simde_mm_maskz_roundscale_ss(k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128 simde_mm_maskz_roundscale_round_ss_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_maskz_roundscale_round_ss_envp; \
int simde_mm_maskz_roundscale_round_ss_x = feholdexcept(&simde_mm_maskz_roundscale_round_ss_envp); \
simde_mm_maskz_roundscale_round_ss_r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); \
if (HEDLEY_LIKELY(simde_mm_maskz_roundscale_round_ss_x == 0)) \
fesetenv(&simde_mm_maskz_roundscale_round_ss_envp); \
} \
else { \
simde_mm_maskz_roundscale_round_ss_r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); \
} \
\
simde_mm_maskz_roundscale_round_ss_r; \
}))
#else
#define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) simde_mm_maskz_roundscale_ss(k, a, b, imm8)
#endif
#elif !(defined(HEDLEY_MSVC_VERSION) && defined(SIMDE_X86_AVX_NATIVE))
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_roundscale_round_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, const int imm8, const int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_maskz_roundscale_ss(k, a, b, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_maskz_roundscale_ss(k, a, b, imm8);
#endif
}
else {
r = simde_mm_maskz_roundscale_ss(k, a, b, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_roundscale_round_ss
#define _mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae)
#endif
#if defined(HEDLEY_MSVC_VERSION)
#pragma warning( pop )
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_roundscale_round_sd(a, b, imm8, sae) _mm_roundscale_round_sd(a, b, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_roundscale_round_sd(a, b, imm8, sae) simde_mm_roundscale_sd(a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_roundscale_round_sd(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d simde_mm_roundscale_round_sd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_roundscale_round_sd_envp; \
int simde_mm_roundscale_round_sd_x = feholdexcept(&simde_mm_roundscale_round_sd_envp); \
simde_mm_roundscale_round_sd_r = simde_mm_roundscale_sd(a, b, imm8); \
if (HEDLEY_LIKELY(simde_mm_roundscale_round_sd_x == 0)) \
fesetenv(&simde_mm_roundscale_round_sd_envp); \
} \
else { \
simde_mm_roundscale_round_sd_r = simde_mm_roundscale_sd(a, b, imm8); \
} \
\
simde_mm_roundscale_round_sd_r; \
}))
#else
#define simde_mm_roundscale_round_sd(a, b, imm8, sae) simde_mm_roundscale_sd(a, b, imm8)
#endif
#elif !(defined(HEDLEY_MSVC_VERSION) && defined(SIMDE_X86_AVX_NATIVE))
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_roundscale_round_sd (simde__m128d a, simde__m128d b, const int imm8, const int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_roundscale_sd(a, b, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_roundscale_sd(a, b, imm8);
#endif
}
else {
r = simde_mm_roundscale_sd(a, b, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_roundscale_round_sd
#define _mm_roundscale_round_sd(a, b, imm8, sae) simde_mm_roundscale_round_sd(a, b, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) _mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_sd(src, k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d simde_mm_mask_roundscale_round_sd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_mask_roundscale_round_sd_envp; \
int simde_mm_mask_roundscale_round_sd_x = feholdexcept(&simde_mm_mask_roundscale_round_sd_envp); \
simde_mm_mask_roundscale_round_sd_r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); \
if (HEDLEY_LIKELY(simde_mm_mask_roundscale_round_sd_x == 0)) \
fesetenv(&simde_mm_mask_roundscale_round_sd_envp); \
} \
else { \
simde_mm_mask_roundscale_round_sd_r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); \
} \
\
simde_mm_mask_roundscale_round_sd_r; \
}))
#else
#define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_sd(src, k, a, b, imm8)
#endif
#elif !(defined(HEDLEY_MSVC_VERSION) && defined(SIMDE_X86_AVX_NATIVE))
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_roundscale_round_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b, const int imm8, const int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8);
#endif
}
else {
r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_roundscale_round_sd
#define _mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035)
#define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) _mm_maskz_roundscale_round_sd(k, a, b, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) simde_mm_maskz_roundscale_sd(k, a, b, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d simde_mm_maskz_roundscale_round_sd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_maskz_roundscale_round_sd_envp; \
int simde_mm_maskz_roundscale_round_sd_x = feholdexcept(&simde_mm_maskz_roundscale_round_sd_envp); \
simde_mm_maskz_roundscale_round_sd_r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); \
if (HEDLEY_LIKELY(simde_mm_maskz_roundscale_round_sd_x == 0)) \
fesetenv(&simde_mm_maskz_roundscale_round_sd_envp); \
} \
else { \
simde_mm_maskz_roundscale_round_sd_r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); \
} \
\
simde_mm_maskz_roundscale_round_sd_r; \
}))
#else
#define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) simde_mm_maskz_roundscale_sd(k, a, b, imm8)
#endif
#elif !(defined(HEDLEY_MSVC_VERSION) && defined(SIMDE_X86_AVX_NATIVE))
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_roundscale_round_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, const int imm8, const int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_maskz_roundscale_sd(k, a, b, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_maskz_roundscale_sd(k, a, b, imm8);
#endif
}
else {
r = simde_mm_maskz_roundscale_sd(k, a, b, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_roundscale_round_sd
#define _mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ROUNDSCALE_ROUND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/slli.h | .h | 6,854 | 180 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_SLLI_H)
#define SIMDE_X86_AVX512_SLLI_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "setzero.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_slli_epi16 (simde__m512i a, const unsigned int imm8)
SIMDE_REQUIRE_RANGE(imm8, 0, 255) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
simde__m512i r;
SIMDE_CONSTIFY_16_(_mm512_slli_epi16, r, simde_mm512_setzero_si512(), imm8, a);
return r;
#elif defined(SIMDE_X86_AVX512BW_NATIVE)
return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_slli_epi16(a, imm8));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
if(imm8 < 16)
r_.i16 = HEDLEY_STATIC_CAST(__typeof__(r_.i16), (a_.i16 << HEDLEY_STATIC_CAST(int16_t, imm8)));
else
return simde_mm512_setzero_si512();
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (imm8 < 16) ? HEDLEY_STATIC_CAST(int16_t, a_.i16[i] << (imm8 & 0xff)) : 0;
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_slli_epi16
#define _mm512_slli_epi16(a, imm8) simde_mm512_slli_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_slli_epi32 (simde__m512i a, unsigned int imm8) {
/* I guess the restriction was added in 6.4, back-ported to 5.5, then
* removed (fixed) in 7? */
#if defined(SIMDE_X86_AVX512F_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
simde__m512i r;
SIMDE_CONSTIFY_32_(_mm512_slli_epi32, r, simde_mm512_setzero_si512(), imm8, a);
return r;
#elif defined(SIMDE_X86_AVX512F_NATIVE)
return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_slli_epi32(a, imm8));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
/* The Intel Intrinsics Guide says that only the 8 LSBits of imm8 are
* used. In this case we should do "imm8 &= 0xff". However in
* practice all bits are used. */
if (imm8 > 31) {
simde_memset(&r_, 0, sizeof(r_));
} else {
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_slli_epi32(a_.m256i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m256i[1] = simde_mm256_slli_epi32(a_.m256i[1], HEDLEY_STATIC_CAST(int, imm8));
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_slli_epi32(a_.m128i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[1] = simde_mm_slli_epi32(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[2] = simde_mm_slli_epi32(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[3] = simde_mm_slli_epi32(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = a_.u32 << imm8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] << imm8;
}
#endif
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_slli_epi32
#define _mm512_slli_epi32(a, imm8) simde_mm512_slli_epi32(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_slli_epi64 (simde__m512i a, unsigned int imm8) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
simde__m512i r;
SIMDE_CONSTIFY_64_(_mm512_slli_epi64, r, simde_mm512_setzero_si512(), imm8, a);
return r;
#elif defined(SIMDE_X86_AVX512F_NATIVE)
return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_slli_epi64(a, imm8));
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
/* The Intel Intrinsics Guide says that only the 8 LSBits of imm8 are
* used. In this case we should do "imm8 &= 0xff". However in
* practice all bits are used. */
if (imm8 > 63) {
simde_memset(&r_, 0, sizeof(r_));
} else {
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = simde_mm256_slli_epi64(a_.m256i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m256i[1] = simde_mm256_slli_epi64(a_.m256i[1], HEDLEY_STATIC_CAST(int, imm8));
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i[0] = simde_mm_slli_epi64(a_.m128i[0], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[1] = simde_mm_slli_epi64(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[2] = simde_mm_slli_epi64(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8));
r_.m128i[3] = simde_mm_slli_epi64(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_97248)
r_.u64 = a_.u64 << imm8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = a_.u64[i] << imm8;
}
#endif
}
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_slli_epi64
#define _mm512_slli_epi64(a, imm8) simde_mm512_slli_epi64(a, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SLLI_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/maddubs.h | .h | 6,126 | 160 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Ashleigh Newman-Jones <ashnewman-jones@hotmail.co.uk>
*/
#if !defined(SIMDE_X86_AVX512_MADDUBS_H)
#define SIMDE_X86_AVX512_MADDUBS_H
#include "types.h"
#include "mov.h"
#include "../avx2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_maddubs_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_maddubs_epi16(src, k, a, b);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_maddubs_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_maddubs_epi16
#define _mm_mask_maddubs_epi16(a, b) simde_mm_mask_maddubs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_maddubs_epi16 (simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE ) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_maddubs_epi16(k, a, b);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_maddubs_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_maddubs_epi16
#define _mm_maskz_maddubs_epi16(a, b) simde_mm_maskz_maddubs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_maddubs_epi16 (simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_maddubs_epi16(src, k, a, b);
#else
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_maddubs_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_maddubs_epi16
#define _mm256_mask_maddubs_epi16(a, b) simde_mm256_mask_maddubs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_maddubs_epi16 (simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_maddubs_epi16(k, a, b);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_maddubs_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_maddubs_epi16
#define _mm256_maskz_maddubs_epi16(a, b) simde_mm256_maskz_maddubs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maddubs_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maddubs_epi16(a, b);
#else
simde__m512i_private r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256) || defined(SIMDE_BUG_CLANG_BAD_MADD)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_maddubs_epi16(a_.m256i[i], b_.m256i[i]);
}
#else
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
const int idx = HEDLEY_STATIC_CAST(int, i) << 1;
int32_t ts =
(HEDLEY_STATIC_CAST(int16_t, a_.u8[ idx ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[ idx ])) +
(HEDLEY_STATIC_CAST(int16_t, a_.u8[idx + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[idx + 1]));
r_.i16[i] = (ts > INT16_MIN) ? ((ts < INT16_MAX) ? HEDLEY_STATIC_CAST(int16_t, ts) : INT16_MAX) : INT16_MIN;
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maddubs_epi16
#define _mm512_maddubs_epi16(a, b) simde_mm512_maddubs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_maddubs_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_maddubs_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_maddubs_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_maddubs_epi16
#define _mm512_mask_maddubs_epi16(a, b) simde_mm512_mask_maddubs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_maddubs_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_maddubs_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_maddubs_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_maddubs_epi16
#define _mm512_maskz_maddubs_epi16(a, b) simde_mm512_maskz_maddubs_epi16(a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_MADDUBS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/dbsad.h | .h | 15,983 | 389 | #if !defined(SIMDE_X86_AVX512_DBSAD_H)
#define SIMDE_X86_AVX512_DBSAD_H
#include "types.h"
#include "mov.h"
#include "../avx2.h"
#include "shuffle.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_dbsad_epu8(a, b, imm8) _mm_dbsad_epu8((a), (b), (imm8))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_dbsad_epu8_internal_ (simde__m128i a, simde__m128i b) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
uint8_t a1 SIMDE_VECTOR(16) =
SIMDE_SHUFFLE_VECTOR_(
8, 16, a_.u8, a_.u8,
0, 1, 0, 1,
4, 5, 4, 5,
8, 9, 8, 9,
12, 13, 12, 13);
uint8_t b1 SIMDE_VECTOR(16) =
SIMDE_SHUFFLE_VECTOR_(
8, 16, b_.u8, b_.u8,
0, 1, 1, 2,
2, 3, 3, 4,
8, 9, 9, 10,
10, 11, 11, 12);
__typeof__(r_.u8) abd1_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd1_mask), a1 < b1);
__typeof__(r_.u8) abd1 = (((b1 - a1) & abd1_mask) | ((a1 - b1) & ~abd1_mask));
r_.u16 =
__builtin_convertvector(__builtin_shufflevector(abd1, abd1, 0, 2, 4, 6, 8, 10, 12, 14), __typeof__(r_.u16)) +
__builtin_convertvector(__builtin_shufflevector(abd1, abd1, 1, 3, 5, 7, 9, 11, 13, 15), __typeof__(r_.u16));
uint8_t a2 SIMDE_VECTOR(16) =
SIMDE_SHUFFLE_VECTOR_(
8, 16, a_.u8, a_.u8,
2, 3, 2, 3,
6, 7, 6, 7,
10, 11, 10, 11,
14, 15, 14, 15);
uint8_t b2 SIMDE_VECTOR(16) =
SIMDE_SHUFFLE_VECTOR_(
8, 16, b_.u8, b_.u8,
2, 3, 3, 4,
4, 5, 5, 6,
10, 11, 11, 12,
12, 13, 13, 14);
__typeof__(r_.u8) abd2_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd2_mask), a2 < b2);
__typeof__(r_.u8) abd2 = (((b2 - a2) & abd2_mask) | ((a2 - b2) & ~abd2_mask));
r_.u16 +=
__builtin_convertvector(__builtin_shufflevector(abd2, abd2, 0, 2, 4, 6, 8, 10, 12, 14), __typeof__(r_.u16)) +
__builtin_convertvector(__builtin_shufflevector(abd2, abd2, 1, 3, 5, 7, 9, 11, 13, 15), __typeof__(r_.u16));
#else
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = 0;
for (size_t j = 0 ; j < 4 ; j++) {
uint16_t A = HEDLEY_STATIC_CAST(uint16_t, a_.u8[((i << 1) & 12) + j]);
uint16_t B = HEDLEY_STATIC_CAST(uint16_t, b_.u8[((i & 3) | ((i << 1) & 8)) + j]);
r_.u16[i] += (A < B) ? (B - A) : (A - B);
}
}
#endif
return simde__m128i_from_private(r_);
}
#define simde_mm_dbsad_epu8(a, b, imm8) simde_mm_dbsad_epu8_internal_((a), simde_mm_shuffle_epi32((b), (imm8)))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_dbsad_epu8
#define _mm_dbsad_epu8(a, b, imm8) simde_mm_dbsad_epu8(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_mask_dbsad_epu8(src, k, a, b, imm8) _mm_mask_dbsad_epu8((src), (k), (a), (b), (imm8))
#else
#define simde_mm_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm_mask_mov_epi16(src, k, simde_mm_dbsad_epu8(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_dbsad_epu8
#define _mm_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm_mask_dbsad_epu8(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm_maskz_dbsad_epu8(k, a, b, imm8) _mm_maskz_dbsad_epu8((k), (a), (b), (imm8))
#else
#define simde_mm_maskz_dbsad_epu8(k, a, b, imm8) simde_mm_maskz_mov_epi16(k, simde_mm_dbsad_epu8(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_dbsad_epu8
#define _mm_maskz_dbsad_epu8(k, a, b, imm8) simde_mm_maskz_dbsad_epu8(k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_dbsad_epu8(a, b, imm8) _mm256_dbsad_epu8((a), (b), (imm8))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm256_dbsad_epu8(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m256i_private \
simde_mm256_dbsad_epu8_a_ = simde__m256i_to_private(a), \
simde_mm256_dbsad_epu8_b_ = simde__m256i_to_private(b); \
\
simde_mm256_dbsad_epu8_a_.m128i[0] = simde_mm_dbsad_epu8(simde_mm256_dbsad_epu8_a_.m128i[0], simde_mm256_dbsad_epu8_b_.m128i[0], imm8); \
simde_mm256_dbsad_epu8_a_.m128i[1] = simde_mm_dbsad_epu8(simde_mm256_dbsad_epu8_a_.m128i[1], simde_mm256_dbsad_epu8_b_.m128i[1], imm8); \
\
simde__m256i_from_private(simde_mm256_dbsad_epu8_a_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_dbsad_epu8_internal_ (simde__m256i a, simde__m256i b) {
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
uint8_t a1 SIMDE_VECTOR(32) =
SIMDE_SHUFFLE_VECTOR_(
8, 32, a_.u8, a_.u8,
0, 1, 0, 1,
4, 5, 4, 5,
8, 9, 8, 9,
12, 13, 12, 13,
16, 17, 16, 17,
20, 21, 20, 21,
24, 25, 24, 25,
28, 29, 28, 29);
uint8_t b1 SIMDE_VECTOR(32) =
SIMDE_SHUFFLE_VECTOR_(
8, 16, b_.u8, b_.u8,
0, 1, 1, 2,
2, 3, 3, 4,
8, 9, 9, 10,
10, 11, 11, 12,
16, 17, 17, 18,
18, 19, 19, 20,
24, 25, 25, 26,
26, 27, 27, 28);
__typeof__(r_.u8) abd1_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd1_mask), a1 < b1);
__typeof__(r_.u8) abd1 = (((b1 - a1) & abd1_mask) | ((a1 - b1) & ~abd1_mask));
r_.u16 =
__builtin_convertvector(__builtin_shufflevector(abd1, abd1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30), __typeof__(r_.u16)) +
__builtin_convertvector(__builtin_shufflevector(abd1, abd1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31), __typeof__(r_.u16));
uint8_t a2 SIMDE_VECTOR(32) =
SIMDE_SHUFFLE_VECTOR_(
8, 32, a_.u8, a_.u8,
2, 3, 2, 3,
6, 7, 6, 7,
10, 11, 10, 11,
14, 15, 14, 15,
18, 19, 18, 19,
22, 23, 22, 23,
26, 27, 26, 27,
30, 31, 30, 31);
uint8_t b2 SIMDE_VECTOR(32) =
SIMDE_SHUFFLE_VECTOR_(
8, 16, b_.u8, b_.u8,
2, 3, 3, 4,
4, 5, 5, 6,
10, 11, 11, 12,
12, 13, 13, 14,
18, 19, 19, 20,
20, 21, 21, 22,
26, 27, 27, 28,
28, 29, 29, 30);
__typeof__(r_.u8) abd2_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd2_mask), a2 < b2);
__typeof__(r_.u8) abd2 = (((b2 - a2) & abd2_mask) | ((a2 - b2) & ~abd2_mask));
r_.u16 +=
__builtin_convertvector(__builtin_shufflevector(abd2, abd2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30), __typeof__(r_.u16)) +
__builtin_convertvector(__builtin_shufflevector(abd2, abd2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31), __typeof__(r_.u16));
#else
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = 0;
for (size_t j = 0 ; j < 4 ; j++) {
uint16_t A = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(((i << 1) & 12) | ((i & 8) << 1)) + j]);
uint16_t B = HEDLEY_STATIC_CAST(uint16_t, b_.u8[((i & 3) | ((i << 1) & 8) | ((i & 8) << 1)) + j]);
r_.u16[i] += (A < B) ? (B - A) : (A - B);
}
}
#endif
return simde__m256i_from_private(r_);
}
#define simde_mm256_dbsad_epu8(a, b, imm8) simde_mm256_dbsad_epu8_internal_((a), simde_mm256_shuffle_epi32(b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_dbsad_epu8
#define _mm256_dbsad_epu8(a, b, imm8) simde_mm256_dbsad_epu8(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_mask_dbsad_epu8(src, k, a, b, imm8) _mm256_mask_dbsad_epu8((src), (k), (a), (b), (imm8))
#else
#define simde_mm256_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm256_mask_mov_epi16(src, k, simde_mm256_dbsad_epu8(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_dbsad_epu8
#define _mm256_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm256_mask_dbsad_epu8(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_maskz_dbsad_epu8(k, a, b, imm8) _mm256_maskz_dbsad_epu8((k), (a), (b), (imm8))
#else
#define simde_mm256_maskz_dbsad_epu8(k, a, b, imm8) simde_mm256_maskz_mov_epi16(k, simde_mm256_dbsad_epu8(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_dbsad_epu8
#define _mm256_maskz_dbsad_epu8(k, a, b, imm8) simde_mm256_maskz_dbsad_epu8(k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE)
#define simde_mm512_dbsad_epu8(a, b, imm8) _mm512_dbsad_epu8((a), (b), (imm8))
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_dbsad_epu8(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512i_private \
simde_mm512_dbsad_epu8_a_ = simde__m512i_to_private(a), \
simde_mm512_dbsad_epu8_b_ = simde__m512i_to_private(b); \
\
simde_mm512_dbsad_epu8_a_.m256i[0] = simde_mm256_dbsad_epu8(simde_mm512_dbsad_epu8_a_.m256i[0], simde_mm512_dbsad_epu8_b_.m256i[0], imm8); \
simde_mm512_dbsad_epu8_a_.m256i[1] = simde_mm256_dbsad_epu8(simde_mm512_dbsad_epu8_a_.m256i[1], simde_mm512_dbsad_epu8_b_.m256i[1], imm8); \
\
simde__m512i_from_private(simde_mm512_dbsad_epu8_a_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_dbsad_epu8_internal_ (simde__m512i a, simde__m512i b) {
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
uint8_t a1 SIMDE_VECTOR(64) =
SIMDE_SHUFFLE_VECTOR_(
8, 64, a_.u8, a_.u8,
0, 1, 0, 1,
4, 5, 4, 5,
8, 9, 8, 9,
12, 13, 12, 13,
16, 17, 16, 17,
20, 21, 20, 21,
24, 25, 24, 25,
28, 29, 28, 29,
32, 33, 32, 33,
36, 37, 36, 37,
40, 41, 40, 41,
44, 45, 44, 45,
48, 49, 48, 49,
52, 53, 52, 53,
56, 57, 56, 57,
60, 61, 60, 61);
uint8_t b1 SIMDE_VECTOR(64) =
SIMDE_SHUFFLE_VECTOR_(
8, 64, b_.u8, b_.u8,
0, 1, 1, 2,
2, 3, 3, 4,
8, 9, 9, 10,
10, 11, 11, 12,
16, 17, 17, 18,
18, 19, 19, 20,
24, 25, 25, 26,
26, 27, 27, 28,
32, 33, 33, 34,
34, 35, 35, 36,
40, 41, 41, 42,
42, 43, 43, 44,
48, 49, 49, 50,
50, 51, 51, 52,
56, 57, 57, 58,
58, 59, 59, 60);
__typeof__(r_.u8) abd1_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd1_mask), a1 < b1);
__typeof__(r_.u8) abd1 = (((b1 - a1) & abd1_mask) | ((a1 - b1) & ~abd1_mask));
r_.u16 =
__builtin_convertvector(__builtin_shufflevector(abd1, abd1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62), __typeof__(r_.u16)) +
__builtin_convertvector(__builtin_shufflevector(abd1, abd1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63), __typeof__(r_.u16));
uint8_t a2 SIMDE_VECTOR(64) =
SIMDE_SHUFFLE_VECTOR_(
8, 64, a_.u8, a_.u8,
2, 3, 2, 3,
6, 7, 6, 7,
10, 11, 10, 11,
14, 15, 14, 15,
18, 19, 18, 19,
22, 23, 22, 23,
26, 27, 26, 27,
30, 31, 30, 31,
34, 35, 34, 35,
38, 39, 38, 39,
42, 43, 42, 43,
46, 47, 46, 47,
50, 51, 50, 51,
54, 55, 54, 55,
58, 59, 58, 59,
62, 63, 62, 63);
uint8_t b2 SIMDE_VECTOR(64) =
SIMDE_SHUFFLE_VECTOR_(
8, 64, b_.u8, b_.u8,
2, 3, 3, 4,
4, 5, 5, 6,
10, 11, 11, 12,
12, 13, 13, 14,
18, 19, 19, 20,
20, 21, 21, 22,
26, 27, 27, 28,
28, 29, 29, 30,
34, 35, 35, 36,
36, 37, 37, 38,
42, 43, 43, 44,
44, 45, 45, 46,
50, 51, 51, 52,
52, 53, 53, 54,
58, 59, 59, 60,
60, 61, 61, 62);
__typeof__(r_.u8) abd2_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd2_mask), a2 < b2);
__typeof__(r_.u8) abd2 = (((b2 - a2) & abd2_mask) | ((a2 - b2) & ~abd2_mask));
r_.u16 +=
__builtin_convertvector(__builtin_shufflevector(abd2, abd2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62), __typeof__(r_.u16)) +
__builtin_convertvector(__builtin_shufflevector(abd2, abd2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63), __typeof__(r_.u16));
#else
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = 0;
for (size_t j = 0 ; j < 4 ; j++) {
uint16_t A = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(((i << 1) & 12) | ((i & 8) << 1) | ((i & 16) << 1)) + j]);
uint16_t B = HEDLEY_STATIC_CAST(uint16_t, b_.u8[((i & 3) | ((i << 1) & 8) | ((i & 8) << 1) | ((i & 16) << 1)) + j]);
r_.u16[i] += (A < B) ? (B - A) : (A - B);
}
}
#endif
return simde__m512i_from_private(r_);
}
#define simde_mm512_dbsad_epu8(a, b, imm8) simde_mm512_dbsad_epu8_internal_((a), simde_mm512_castps_si512(simde_mm512_shuffle_ps(simde_mm512_castsi512_ps(b), simde_mm512_castsi512_ps(b), imm8)))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_dbsad_epu8
#define _mm512_dbsad_epu8(a, b, imm8) simde_mm512_dbsad_epu8(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE)
#define simde_mm512_mask_dbsad_epu8(src, k, a, b, imm8) _mm512_mask_dbsad_epu8((src), (k), (a), (b), (imm8))
#else
#define simde_mm512_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm512_mask_mov_epi16(src, k, simde_mm512_dbsad_epu8(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_dbsad_epu8
#define _mm512_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm512_mask_dbsad_epu8(src, k, a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE)
#define simde_mm512_maskz_dbsad_epu8(k, a, b, imm8) _mm512_maskz_dbsad_epu8((k), (a), (b), (imm8))
#else
#define simde_mm512_maskz_dbsad_epu8(k, a, b, imm8) simde_mm512_maskz_mov_epi16(k, simde_mm512_dbsad_epu8(a, b, imm8))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_dbsad_epu8
#define _mm512_maskz_dbsad_epu8(k, a, b, imm8) simde_mm512_maskz_dbsad_epu8(k, a, b, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_DBSAD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/cvt.h | .h | 13,141 | 403 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
* 2020 Hidayat Khan <huk2209@gmail.com>
* 2021 Andrew Rodriguez <anrodriguez@linkedin.com>
*/
#if !defined(SIMDE_X86_AVX512_CVT_H)
#define SIMDE_X86_AVX512_CVT_H
#include "types.h"
#include "mov.h"
#include "../../simde-f16.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtepi64_pd (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm_cvtepi64_pd(a);
#else
simde__m128d_private r_;
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_X86_SSE2_NATIVE)
/* https://stackoverflow.com/questions/41144668/how-to-efficiently-perform-double-int64-conversions-with-sse-avx */
__m128i xH = _mm_srai_epi32(a_.n, 16);
#if defined(SIMDE_X86_SSE4_2_NATIVE)
xH = _mm_blend_epi16(xH, _mm_setzero_si128(), 0x33);
#else
xH = _mm_and_si128(xH, _mm_set_epi16(~INT16_C(0), ~INT16_C(0), INT16_C(0), INT16_C(0), ~INT16_C(0), ~INT16_C(0), INT16_C(0), INT16_C(0)));
#endif
xH = _mm_add_epi64(xH, _mm_castpd_si128(_mm_set1_pd(442721857769029238784.0)));
const __m128i e = _mm_castpd_si128(_mm_set1_pd(0x0010000000000000));
#if defined(SIMDE_X86_SSE4_2_NATIVE)
__m128i xL = _mm_blend_epi16(a_.n, e, 0x88);
#else
__m128i m = _mm_set_epi16(INT16_C(0), ~INT16_C(0), ~INT16_C(0), ~INT16_C(0), INT16_C(0), ~INT16_C(0), ~INT16_C(0), ~INT16_C(0));
__m128i xL = _mm_or_si128(_mm_and_si128(m, a_.n), _mm_andnot_si128(m, e));
#endif
__m128d f = _mm_sub_pd(_mm_castsi128_pd(xH), _mm_set1_pd(442726361368656609280.0));
return _mm_add_pd(f, _mm_castsi128_pd(xL));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f64, a_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = HEDLEY_STATIC_CAST(simde_float64, a_.i64[i]);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm_cvtepi64_pd
#define _mm_cvtepi64_pd(a) simde_mm_cvtepi64_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_cvtepi64_pd(simde__m128d src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm_mask_cvtepi64_pd(src, k, a);
#else
return simde_mm_mask_mov_pd(src, k, simde_mm_cvtepi64_pd(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_cvtepi64_pd
#define _mm_mask_cvtepi64_pd(src, k, a) simde_mm_mask_cvtepi64_pd(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_cvtepi64_pd(simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
return _mm_maskz_cvtepi64_pd(k, a);
#else
return simde_mm_maskz_mov_pd(k, simde_mm_cvtepi64_pd(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_cvtepi64_pd
#define _mm_maskz_cvtepi64_pd(k, a) simde_mm_maskz_cvtepi64_pd((k), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_cvtepi16_epi32 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtepi16_epi32(a);
#else
simde__m512i_private r_;
simde__m256i_private a_ = simde__m256i_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i16[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepi16_epi32
#define _mm512_cvtepi16_epi32(a) simde_mm512_cvtepi16_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm512_cvtepi16_epi8 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cvtepi16_epi8(a);
#else
simde__m256i_private r_;
simde__m512i_private a_ = simde__m512i_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i8, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, a_.i16[i]);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepi16_epi8
#define _mm512_cvtepi16_epi8(a) simde_mm512_cvtepi16_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm512_mask_cvtepi16_epi8 (simde__m256i src, simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_cvtepi16_epi8(src, k, a);
#else
return simde_mm256_mask_mov_epi8(src, k, simde_mm512_cvtepi16_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_cvtepi16_epi8
#define _mm512_mask_cvtepi16_epi8(src, k, a) simde_mm512_mask_cvtepi16_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm512_maskz_cvtepi16_epi8 (simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_cvtepi16_epi8(k, a);
#else
return simde_mm256_maskz_mov_epi8(k, simde_mm512_cvtepi16_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_cvtepi16_epi8
#define _mm512_maskz_cvtepi16_epi8(k, a) simde_mm512_maskz_cvtepi16_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_cvtepi8_epi16 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_cvtepi8_epi16(a);
#else
simde__m512i_private r_;
simde__m256i_private a_ = simde__m256i_to_private(a);
#if defined(SIMDE_X86_AVX2_NATIVE)
r_.m256i[0] = _mm256_cvtepi8_epi16(a_.m128i[0]);
r_.m256i[1] = _mm256_cvtepi8_epi16(a_.m128i[1]);
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i16, a_.i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i8[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepi8_epi16
#define _mm512_cvtepi8_epi16(a) simde_mm512_cvtepi8_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_cvtepi32_ps (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtepi32_ps(a);
#else
simde__m512_private r_;
simde__m512i_private a_ = simde__m512i_to_private(a);
#if defined(SIMDE_X86_AVX_NATIVE)
r_.m256[0] = _mm256_cvtepi32_ps(a_.m256i[0]);
r_.m256[1] = _mm256_cvtepi32_ps(a_.m256i[1]);
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.i32[i]);
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepi32_ps
#define _mm512_cvtepi32_ps(a) simde_mm512_cvtepi32_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm512_cvtepi64_epi32 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtepi64_epi32(a);
#else
simde__m256i_private r_;
simde__m512i_private a_ = simde__m512i_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i64[i]);
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepi64_epi32
#define _mm512_cvtepi64_epi32(a) simde_mm512_cvtepi64_epi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_cvtepu16_epi32 (simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtepu16_epi32(a);
#else
simde__m512i_private r_;
simde__m256i_private a_ = simde__m256i_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.u16[i]);
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepu16_epi32
#define _mm512_cvtepu16_epi32(a) simde_mm512_cvtepu16_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_cvtepu32_ps (simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtepu32_ps(a);
#else
simde__m512_private r_;
simde__m512i_private a_ = simde__m512i_to_private(a);
#if defined(SIMDE_X86_SSE2_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) {
/* https://stackoverflow.com/a/34067907/501126 */
const __m128 tmp = _mm_cvtepi32_ps(_mm_srli_epi32(a_.m128i[i], 1));
r_.m128[i] =
_mm_add_ps(
_mm_add_ps(tmp, tmp),
_mm_cvtepi32_ps(_mm_and_si128(a_.m128i[i], _mm_set1_epi32(1)))
);
}
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(float, a_.u32[i]);
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtepu32_ps
#define _mm512_cvtepu32_ps(a) simde_mm512_cvtepu32_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_cvtph_ps(simde__m256i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtph_ps(a);
#endif
simde__m256i_private a_ = simde__m256i_to_private(a);
simde__m512_private r_;
#if defined(SIMDE_X86_F16C_NATIVE)
r_.m256[0] = _mm256_cvtph_ps(a_.m128i[0]);
r_.m256[1] = _mm256_cvtph_ps(a_.m128i[1]);
#elif defined(SIMDE_FLOAT16_VECTOR)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_float16_to_float32(a_.f16[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_float16_to_float32(simde_uint16_as_float16(a_.u16[i]));
}
#endif
return simde__m512_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtph_ps
#define _mm512_cvtph_ps(a) simde_mm512_cvtph_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_cvtps_epi32(simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_cvtps_epi32(a);
#endif
simde__m512_private a_ = simde__m512_to_private(a);
simde__m512i_private r_;
#if defined(SIMDE_X86_AVX_NATIVE)
r_.m256i[0] = _mm256_cvtps_epi32(a_.m256[0]);
r_.m256i[1] = _mm256_cvtps_epi32(a_.m256[1]);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_cvtps_epi32
#define _mm512_cvtps_epi32(a) simde_mm512_cvtps_epi32(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_CVT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/madd.h | .h | 5,934 | 158 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Ashleigh Newman-Jones <ashnewman-jones@hotmail.co.uk>
*/
#if !defined(SIMDE_X86_AVX512_MADD_H)
#define SIMDE_X86_AVX512_MADD_H
#include "types.h"
#include "mov.h"
#include "../avx2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_madd_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_madd_epi16(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_madd_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_madd_epi16
#define _mm_mask_madd_epi16(src, k, a, b) simde_mm_mask_madd_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_madd_epi16 (simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_madd_epi16(k, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_madd_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_madd_epi16
#define _mm_maskz_madd_epi16(src, k, a, b) simde_mm_maskz_madd_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_madd_epi16 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_madd_epi16(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_madd_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_madd_epi16
#define _mm256_mask_madd_epi16(src, k, a, b) simde_mm256_mask_madd_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_madd_epi16 (simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_madd_epi16(k, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_madd_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_madd_epi16
#define _mm256_maskz_madd_epi16(src, k, a, b) simde_mm256_maskz_madd_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_madd_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_madd_epi16(a, b);
#else
simde__m512i_private r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if 0 && SIMDE_NATURAL_VECTOR_SIZE_LE(256) || defined(SIMDE_BUG_CLANG_BAD_MADD)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_madd_epi16(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) {
r_.i32[i / 2] =
(HEDLEY_STATIC_CAST(int32_t, a_.i16[ i ]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[ i ])) +
(HEDLEY_STATIC_CAST(int32_t, a_.i16[i + 1]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i + 1]));
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_madd_epi16
#define _mm512_madd_epi16(a, b) simde_mm512_madd_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_madd_epi16 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_madd_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_madd_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_madd_epi16
#define _mm512_mask_madd_epi16(src, k, a, b) simde_mm512_mask_madd_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_madd_epi16 (simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_madd_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_madd_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_madd_epi16
#define _mm512_maskz_madd_epi16(src, k, a, b) simde_mm512_maskz_madd_epi16(src, k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_MADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/rcp.h | .h | 2,133 | 66 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2023 Michael R. Crusoe <crusoe@debian.org>
*/
#if !defined(SIMDE_X86_AVX512_RCP_H)
#define SIMDE_X86_AVX512_RCP_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
// TODO: "The maximum relative error for this approximation is less than 2^-14."
// vs 1.5*2^-12 for _mm{,256}_rcp_ps
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_rcp14_ps (simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_rcp14_ps(a);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = SIMDE_FLOAT32_C(1.0) / a_.f32[i];
}
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_rcp14_ps
#define _mm512_rcp14_ps(a) simde_mm512_rcp14_ps(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_RCP_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/fnmadd.h | .h | 3,620 | 109 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 kitegi <kitegi@users.noreply.github.com>
*/
#if !defined(SIMDE_X86_AVX512_FNMADD_H)
#define SIMDE_X86_AVX512_FNMADD_H
#include "types.h"
#include "mov.h"
#include "../fma.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_fnmadd_ps (simde__m512 a, simde__m512 b, simde__m512 c) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_fnmadd_ps(a, b, c);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b),
c_ = simde__m512_to_private(c);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
r_.m256[i] = simde_mm256_fnmadd_ps(a_.m256[i], b_.m256[i], c_.m256[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = -(a_.f32 * b_.f32) + c_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -(a_.f32[i] * b_.f32[i]) + c_.f32[i];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_fnmadd_ps
#define _mm512_fnmadd_ps(a, b, c) simde_mm512_fnmadd_ps(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_fnmadd_pd (simde__m512d a, simde__m512d b, simde__m512d c) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_fnmadd_pd(a, b, c);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b),
c_ = simde__m512d_to_private(c);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) {
r_.m256d[i] = simde_mm256_fnmadd_pd(a_.m256d[i], b_.m256d[i], c_.m256d[i]);
}
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = -(a_.f64 * b_.f64) + c_.f64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = -(a_.f64[i] * b_.f64[i]) + c_.f64[i];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_fnmadd_pd
#define _mm512_fnmadd_pd(a, b, c) simde_mm512_fnmadd_pd(a, b, c)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_FNMADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/abs.h | .h | 19,089 | 581 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_ABS_H)
#define SIMDE_X86_AVX512_ABS_H
#include "types.h"
#include "mov.h"
#include "../avx2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_abs_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_abs_epi8(src, k, a);
#else
return simde_mm_mask_mov_epi8(src, k, simde_mm_abs_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_abs_epi8
#define _mm_mask_abs_epi8(src, k, a) simde_mm_mask_abs_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_abs_epi8(simde__mmask16 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_maskz_abs_epi8(k, a);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_abs_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_abs_epi8
#define _mm_maskz_abs_epi8(k, a) simde_mm_maskz_abs_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_abs_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_mask_abs_epi16(src, k, a);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_abs_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_abs_epi16
#define _mm_mask_abs_epi16(src, k, a) simde_mm_mask_abs_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_abs_epi16(simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm_maskz_abs_epi16(k, a);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_abs_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_abs_epi16
#define _mm_maskz_abs_epi16(k, a) simde_mm_maskz_abs_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_abs_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_abs_epi32(src, k, a);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_abs_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_abs_epi32
#define _mm_mask_abs_epi32(src, k, a) simde_mm_mask_abs_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_abs_epi32(simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_abs_epi32(k, a);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_abs_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_abs_epi32
#define _mm_maskz_abs_epi32(k, a) simde_mm_maskz_abs_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_abs_epi64(simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_abs_epi64(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
const __m128i m = _mm_srai_epi32(_mm_shuffle_epi32(a, 0xF5), 31);
return _mm_sub_epi64(_mm_xor_si128(a, m), m);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i64 = vabsq_s64(a_.neon_i64);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int64x2_t m = vshrq_n_s64(a_.neon_i64, 63);
r_.neon_i64 = vsubq_s64(veorq_s64(a_.neon_i64, m), m);
#elif (defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION)) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i64 = vec_abs(a_.altivec_i64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_abs(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
__typeof__(r_.i64) z = { 0, };
__typeof__(r_.i64) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 < z);
r_.i64 = (-a_.i64 & m) | (a_.i64 & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_abs_epi64
#define _mm_abs_epi64(a) simde_mm_abs_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_abs_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_abs_epi64(src, k, a);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_abs_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_abs_epi64
#define _mm_mask_abs_epi64(src, k, a) simde_mm_mask_abs_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_abs_epi64(simde__mmask8 k, simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_abs_epi64(k, a);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_abs_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_abs_epi64
#define _mm_maskz_abs_epi64(k, a) simde_mm_maskz_abs_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_abs_epi64(simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_abs_epi64(a);
#else
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
r_.m128i[i] = simde_mm_abs_epi64(a_.m128i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i];
}
#endif
return simde__m256i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_abs_epi64
#define _mm256_abs_epi64(a) simde_mm256_abs_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_abs_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_abs_epi64(src, k, a);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_abs_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_abs_epi64
#define _mm256_mask_abs_epi64(src, k, a) simde_mm256_mask_abs_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_abs_epi64(simde__mmask8 k, simde__m256i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_abs_epi64(k, a);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_abs_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_abs_epi64
#define _mm256_maskz_abs_epi64(k, a) simde_mm256_maskz_abs_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_abs_epi8 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_abs_epi8(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_abs_epi8(a_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] < INT32_C(0)) ? -a_.i8[i] : a_.i8[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_abs_epi8
#define _mm512_abs_epi8(a) simde_mm512_abs_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_abs_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_abs_epi8(src, k, a);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_abs_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_abs_epi8
#define _mm512_mask_abs_epi8(src, k, a) simde_mm512_mask_abs_epi8(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_abs_epi8 (simde__mmask64 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_abs_epi8(k, a);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_abs_epi8(a));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_abs_epi8
#define _mm512_maskz_abs_epi8(k, a) simde_mm512_maskz_abs_epi8(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_abs_epi16 (simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_abs_epi16(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_abs_epi16(a_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < INT32_C(0)) ? -a_.i16[i] : a_.i16[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_abs_epi16
#define _mm512_abs_epi16(a) simde_mm512_abs_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_abs_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_abs_epi16(src, k, a);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_abs_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_abs_epi16
#define _mm512_mask_abs_epi16(src, k, a) simde_mm512_mask_abs_epi16(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_abs_epi16 (simde__mmask32 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_abs_epi16(k, a);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_abs_epi16(a));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_abs_epi16
#define _mm512_maskz_abs_epi16(k, a) simde_mm512_maskz_abs_epi16(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_abs_epi32(simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_abs_epi32(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_abs_epi32(a_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = (a_.i32[i] < INT64_C(0)) ? -a_.i32[i] : a_.i32[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_abs_epi32
#define _mm512_abs_epi32(a) simde_mm512_abs_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_abs_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_abs_epi32(src, k, a);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_abs_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_abs_epi32
#define _mm512_mask_abs_epi32(src, k, a) simde_mm512_mask_abs_epi32(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_abs_epi32(simde__mmask16 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_abs_epi32(k, a);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_abs_epi32(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_abs_epi32
#define _mm512_maskz_abs_epi32(k, a) simde_mm512_maskz_abs_epi32(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_abs_epi64(simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_abs_epi64(a);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_abs_epi64(a_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_abs_epi64
#define _mm512_abs_epi64(a) simde_mm512_abs_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_abs_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_abs_epi64(src, k, a);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_abs_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_abs_epi64
#define _mm512_mask_abs_epi64(src, k, a) simde_mm512_mask_abs_epi64(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_abs_epi64(simde__mmask8 k, simde__m512i a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_abs_epi64(k, a);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_abs_epi64(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_abs_epi64
#define _mm512_maskz_abs_epi64(k, a) simde_mm512_maskz_abs_epi64(k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_abs_ps(simde__m512 v2) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0))
return _mm512_abs_ps(v2);
#else
simde__m512_private
r_,
v2_ = simde__m512_to_private(v2);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].neon_f32 = vabsq_f32(v2_.m128_private[i].neon_f32);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
r_.m128_private[i].altivec_f32 = vec_abs(v2_.m128_private[i].altivec_f32);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = (v2_.f32[i] < INT64_C(0)) ? -v2_.f32[i] : v2_.f32[i];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_abs_ps
#define _mm512_abs_ps(v2) simde_mm512_abs_ps(v2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_abs_ps(simde__m512 src, simde__mmask16 k, simde__m512 v2) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0))
return _mm512_mask_abs_ps(src, k, v2);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_abs_ps(v2));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_abs_ps
#define _mm512_mask_abs_ps(src, k, v2) simde_mm512_mask_abs_ps(src, k, v2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_abs_pd(simde__m512d v2) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,3,0))
return _mm512_abs_pd(v2);
#elif defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0))
/* gcc bug: https://gcc.gnu.org/legacy-ml/gcc-patches/2018-01/msg01962.html */
return _mm512_abs_pd(_mm512_castpd_ps(v2));
#else
simde__m512d_private
r_,
v2_ = simde__m512d_to_private(v2);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].neon_f64 = vabsq_f64(v2_.m128d_private[i].neon_f64);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) {
r_.m128d_private[i].altivec_f64 = vec_abs(v2_.m128d_private[i].altivec_f64);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
r_.f64[i] = (v2_.f64[i] < INT64_C(0)) ? -v2_.f64[i] : v2_.f64[i];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_abs_pd
#define _mm512_abs_pd(v2) simde_mm512_abs_pd(v2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_abs_pd(simde__m512d src, simde__mmask8 k, simde__m512d v2) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,3,0))
return _mm512_mask_abs_pd(src, k, v2);
#elif defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0))
/* gcc bug: https://gcc.gnu.org/legacy-ml/gcc-patches/2018-01/msg01962.html */
return _mm512_mask_abs_pd(src, k, _mm512_castpd_ps(v2));
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_abs_pd(v2));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_abs_pd
#define _mm512_mask_abs_pd(src, k, v2) simde_mm512_mask_abs_pd(src, k, v2)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_ABS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/sqrt.h | .h | 3,977 | 128 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
#if !defined(SIMDE_X86_AVX512_SQRT_H)
#define SIMDE_X86_AVX512_SQRT_H
#include "types.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_sqrt_ps (simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_sqrt_ps(a);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a);
#if defined(SIMDE_X86_AVX_NATIVE)
r_.m256[0] = simde_mm256_sqrt_ps(a_.m256[0]);
r_.m256[1] = simde_mm256_sqrt_ps(a_.m256[1]);
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
# define _mm512_sqrt_ps(a) simde_mm512_sqrt_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_sqrt_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_sqrt_ps(src, k, a);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_sqrt_ps(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_sqrt_ps
#define _mm512_mask_sqrt_ps(src, k, a) simde_mm512_mask_sqrt_ps(src, k, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_sqrt_pd (simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_sqrt_pd(a);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a);
#if defined(SIMDE_X86_AVX_NATIVE)
r_.m256d[0] = simde_mm256_sqrt_pd(a_.m256d[0]);
r_.m256d[1] = simde_mm256_sqrt_pd(a_.m256d[1]);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_sqrt(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
# define _mm512_sqrt_pd(a) simde_mm512_sqrt_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_sqrt_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_sqrt_pd(src, k, a);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_sqrt_pd(a));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_sqrt_pd
#define _mm512_mask_sqrt_pd(src, k, a) simde_mm512_mask_sqrt_pd(src, k, a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SQRT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/unpacklo.h | .h | 28,831 | 753 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_UNPACKLO_H)
#define SIMDE_X86_AVX512_UNPACKLO_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpacklo_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_unpacklo_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 64, a_.i8, b_.i8,
0, 64, 1, 65, 2, 66, 3, 67,
4, 68, 5, 69, 6, 70, 7, 71,
16, 80, 17, 81, 18, 82, 19, 83,
20, 84, 21, 85, 22, 86, 23, 87,
32, 96, 33, 97, 34, 98, 35, 99,
36, 100, 37, 101, 38, 102, 39, 103,
48, 112, 49, 113, 50, 114, 51, 115,
52, 116, 53, 117, 54, 118, 55, 119);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpacklo_epi8(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpacklo_epi8(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0]) / 2) ; i++) {
r_.i8[2 * i] = a_.i8[i + ~(~i | 7)];
r_.i8[2 * i + 1] = b_.i8[i + ~(~i | 7)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpacklo_epi8
#define _mm512_unpacklo_epi8(a, b) simde_mm512_unpacklo_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpacklo_epi8(simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_unpacklo_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_unpacklo_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpacklo_epi8
#define _mm512_mask_unpacklo_epi8(src, k, a, b) simde_mm512_mask_unpacklo_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpacklo_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_unpacklo_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_unpacklo_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpacklo_epi8
#define _mm512_maskz_unpacklo_epi8(k, a, b) simde_mm512_maskz_unpacklo_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpacklo_epi8(simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpacklo_epi8(src, k, a, b);
#else
return simde_mm256_mask_mov_epi8(src, k, simde_mm256_unpacklo_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpacklo_epi8
#define _mm256_mask_unpacklo_epi8(src, k, a, b) simde_mm256_mask_unpacklo_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpacklo_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpacklo_epi8(k, a, b);
#else
return simde_mm256_maskz_mov_epi8(k, simde_mm256_unpacklo_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpacklo_epi8
#define _mm256_maskz_unpacklo_epi8(k, a, b) simde_mm256_maskz_unpacklo_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpacklo_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpacklo_epi8(src, k, a, b);
#else
return simde_mm_mask_mov_epi8(src, k, simde_mm_unpacklo_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpacklo_epi8
#define _mm_mask_unpacklo_epi8(src, k, a, b) simde_mm_mask_unpacklo_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpacklo_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpacklo_epi8(k, a, b);
#else
return simde_mm_maskz_mov_epi8(k, simde_mm_unpacklo_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpacklo_epi8
#define _mm_maskz_unpacklo_epi8(k, a, b) simde_mm_maskz_unpacklo_epi8(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpacklo_epi16 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_unpacklo_epi16(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 64, a_.i16, b_.i16,
0, 32, 1, 33, 2, 34, 3, 35, 8, 40, 9, 41, 10, 42, 11, 43,
16, 48, 17, 49, 18, 50, 19, 51, 24, 56, 25, 57, 26, 58, 27, 59);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpacklo_epi16(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpacklo_epi16(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0]) / 2) ; i++) {
r_.i16[2 * i] = a_.i16[i + ~(~i | 3)];
r_.i16[2 * i + 1] = b_.i16[i + ~(~i | 3)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpacklo_epi16
#define _mm512_unpacklo_epi16(a, b) simde_mm512_unpacklo_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpacklo_epi16(simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_unpacklo_epi16(src, k, a, b);
#else
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_unpacklo_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpacklo_epi16
#define _mm512_mask_unpacklo_epi16(src, k, a, b) simde_mm512_mask_unpacklo_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpacklo_epi16(simde__mmask32 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_unpacklo_epi16(k, a, b);
#else
return simde_mm512_maskz_mov_epi16(k, simde_mm512_unpacklo_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpacklo_epi16
#define _mm512_maskz_unpacklo_epi16(k, a, b) simde_mm512_maskz_unpacklo_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpacklo_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpacklo_epi16(src, k, a, b);
#else
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_unpacklo_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpacklo_epi16
#define _mm256_mask_unpacklo_epi16(src, k, a, b) simde_mm256_mask_unpacklo_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpacklo_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpacklo_epi16(k, a, b);
#else
return simde_mm256_maskz_mov_epi16(k, simde_mm256_unpacklo_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpacklo_epi16
#define _mm256_maskz_unpacklo_epi16(k, a, b) simde_mm256_maskz_unpacklo_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpacklo_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpacklo_epi16(src, k, a, b);
#else
return simde_mm_mask_mov_epi16(src, k, simde_mm_unpacklo_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpacklo_epi16
#define _mm_mask_unpacklo_epi16(src, k, a, b) simde_mm_mask_unpacklo_epi16(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpacklo_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpacklo_epi16(k, a, b);
#else
return simde_mm_maskz_mov_epi16(k, simde_mm_unpacklo_epi16(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpacklo_epi16
#define _mm_maskz_unpacklo_epi16(k, a, b) simde_mm_maskz_unpacklo_epi16(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpacklo_epi32 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpacklo_epi32(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.i32, b_.i32,
0, 16, 1, 17, 4, 20, 5, 21,
8, 24, 9, 25, 12, 28, 13, 29);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpacklo_epi32(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpacklo_epi32(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0]) / 2) ; i++) {
r_.i32[2 * i] = a_.i32[i + ~(~i | 1)];
r_.i32[2 * i + 1] = b_.i32[i + ~(~i | 1)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpacklo_epi32
#define _mm512_unpacklo_epi32(a, b) simde_mm512_unpacklo_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpacklo_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpacklo_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_unpacklo_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpacklo_epi32
#define _mm512_mask_unpacklo_epi32(src, k, a, b) simde_mm512_mask_unpacklo_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpacklo_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpacklo_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_unpacklo_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpacklo_epi32
#define _mm512_maskz_unpacklo_epi32(k, a, b) simde_mm512_maskz_unpacklo_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpacklo_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpacklo_epi32(src, k, a, b);
#else
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_unpacklo_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpacklo_epi32
#define _mm256_mask_unpacklo_epi32(src, k, a, b) simde_mm256_mask_unpacklo_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpacklo_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpacklo_epi32(k, a, b);
#else
return simde_mm256_maskz_mov_epi32(k, simde_mm256_unpacklo_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpacklo_epi32
#define _mm256_maskz_unpacklo_epi32(k, a, b) simde_mm256_maskz_unpacklo_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpacklo_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpacklo_epi32(src, k, a, b);
#else
return simde_mm_mask_mov_epi32(src, k, simde_mm_unpacklo_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpacklo_epi32
#define _mm_mask_unpacklo_epi32(src, k, a, b) simde_mm_mask_unpacklo_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpacklo_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpacklo_epi32(k, a, b);
#else
return simde_mm_maskz_mov_epi32(k, simde_mm_unpacklo_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpacklo_epi32
#define _mm_maskz_unpacklo_epi32(k, a, b) simde_mm_maskz_unpacklo_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_unpacklo_epi64 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpacklo_epi64(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.i64, b_.i64, 0, 8, 2, 10, 4, 12, 6, 14);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256i[0] = simde_mm256_unpacklo_epi64(a_.m256i[0], b_.m256i[0]);
r_.m256i[1] = simde_mm256_unpacklo_epi64(a_.m256i[1], b_.m256i[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0]) / 2) ; i++) {
r_.i64[2 * i] = a_.i64[2 * i];
r_.i64[2 * i + 1] = b_.i64[2 * i];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpacklo_epi64
#define _mm512_unpacklo_epi64(a, b) simde_mm512_unpacklo_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_unpacklo_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpacklo_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_unpacklo_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpacklo_epi64
#define _mm512_mask_unpacklo_epi64(src, k, a, b) simde_mm512_mask_unpacklo_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_unpacklo_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpacklo_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_unpacklo_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpacklo_epi64
#define _mm512_maskz_unpacklo_epi64(k, a, b) simde_mm512_maskz_unpacklo_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_mask_unpacklo_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpacklo_epi64(src, k, a, b);
#else
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_unpacklo_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpacklo_epi64
#define _mm256_mask_unpacklo_epi64(src, k, a, b) simde_mm256_mask_unpacklo_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_maskz_unpacklo_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpacklo_epi64(k, a, b);
#else
return simde_mm256_maskz_mov_epi64(k, simde_mm256_unpacklo_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpacklo_epi64
#define _mm256_maskz_unpacklo_epi64(k, a, b) simde_mm256_maskz_unpacklo_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mask_unpacklo_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpacklo_epi64(src, k, a, b);
#else
return simde_mm_mask_mov_epi64(src, k, simde_mm_unpacklo_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpacklo_epi64
#define _mm_mask_unpacklo_epi64(src, k, a, b) simde_mm_mask_unpacklo_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_maskz_unpacklo_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpacklo_epi64(k, a, b);
#else
return simde_mm_maskz_mov_epi64(k, simde_mm_unpacklo_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpacklo_epi64
#define _mm_maskz_unpacklo_epi64(k, a, b) simde_mm_maskz_unpacklo_epi64(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_unpacklo_ps (simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpacklo_ps(a, b);
#else
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.f32, b_.f32,
0, 16, 1, 17, 4, 20, 5, 21,
8, 24, 9, 25, 12, 28, 13, 29);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256[0] = simde_mm256_unpacklo_ps(a_.m256[0], b_.m256[0]);
r_.m256[1] = simde_mm256_unpacklo_ps(a_.m256[1], b_.m256[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0]) / 2) ; i++) {
r_.f32[2 * i] = a_.f32[i + ~(~i | 1)];
r_.f32[2 * i + 1] = b_.f32[i + ~(~i | 1)];
}
#endif
return simde__m512_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpacklo_ps
#define _mm512_unpacklo_ps(a, b) simde_mm512_unpacklo_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_unpacklo_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpacklo_ps(src, k, a, b);
#else
return simde_mm512_mask_mov_ps(src, k, simde_mm512_unpacklo_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpacklo_ps
#define _mm512_mask_unpacklo_ps(src, k, a, b) simde_mm512_mask_unpacklo_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_unpacklo_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpacklo_ps(k, a, b);
#else
return simde_mm512_maskz_mov_ps(k, simde_mm512_unpacklo_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpacklo_ps
#define _mm512_maskz_unpacklo_ps(k, a, b) simde_mm512_maskz_unpacklo_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_mask_unpacklo_ps(simde__m256 src, simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpacklo_ps(src, k, a, b);
#else
return simde_mm256_mask_mov_ps(src, k, simde_mm256_unpacklo_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpacklo_ps
#define _mm256_mask_unpacklo_ps(src, k, a, b) simde_mm256_mask_unpacklo_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_maskz_unpacklo_ps(simde__mmask8 k, simde__m256 a, simde__m256 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpacklo_ps(k, a, b);
#else
return simde_mm256_maskz_mov_ps(k, simde_mm256_unpacklo_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpacklo_ps
#define _mm256_maskz_unpacklo_ps(k, a, b) simde_mm256_maskz_unpacklo_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_unpacklo_ps(simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpacklo_ps(src, k, a, b);
#else
return simde_mm_mask_mov_ps(src, k, simde_mm_unpacklo_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpacklo_ps
#define _mm_mask_unpacklo_ps(src, k, a, b) simde_mm_mask_unpacklo_ps(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_unpacklo_ps(simde__mmask8 k, simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpacklo_ps(k, a, b);
#else
return simde_mm_maskz_mov_ps(k, simde_mm_unpacklo_ps(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpacklo_ps
#define _mm_maskz_unpacklo_ps(k, a, b) simde_mm_maskz_unpacklo_ps(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_unpacklo_pd (simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_unpacklo_pd(a, b);
#else
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.f64, b_.f64, 0, 8, 2, 10, 4, 12, 6, 14);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
r_.m256d[0] = simde_mm256_unpacklo_pd(a_.m256d[0], b_.m256d[0]);
r_.m256d[1] = simde_mm256_unpacklo_pd(a_.m256d[1], b_.m256d[1]);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0]) / 2) ; i++) {
r_.f64[2 * i] = a_.f64[2 * i];
r_.f64[2 * i + 1] = b_.f64[2 * i];
}
#endif
return simde__m512d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_unpacklo_pd
#define _mm512_unpacklo_pd(a, b) simde_mm512_unpacklo_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_unpacklo_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_unpacklo_pd(src, k, a, b);
#else
return simde_mm512_mask_mov_pd(src, k, simde_mm512_unpacklo_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_unpacklo_pd
#define _mm512_mask_unpacklo_pd(src, k, a, b) simde_mm512_mask_unpacklo_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_unpacklo_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_unpacklo_pd(k, a, b);
#else
return simde_mm512_maskz_mov_pd(k, simde_mm512_unpacklo_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_unpacklo_pd
#define _mm512_maskz_unpacklo_pd(k, a, b) simde_mm512_maskz_unpacklo_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_mask_unpacklo_pd(simde__m256d src, simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_mask_unpacklo_pd(src, k, a, b);
#else
return simde_mm256_mask_mov_pd(src, k, simde_mm256_unpacklo_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_mask_unpacklo_pd
#define _mm256_mask_unpacklo_pd(src, k, a, b) simde_mm256_mask_unpacklo_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_maskz_unpacklo_pd(simde__mmask8 k, simde__m256d a, simde__m256d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm256_maskz_unpacklo_pd(k, a, b);
#else
return simde_mm256_maskz_mov_pd(k, simde_mm256_unpacklo_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_maskz_unpacklo_pd
#define _mm256_maskz_unpacklo_pd(k, a, b) simde_mm256_maskz_unpacklo_pd(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_unpacklo_pd(simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_mask_unpacklo_pd(src, k, a, b);
#else
return simde_mm_mask_mov_pd(src, k, simde_mm_unpacklo_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_unpacklo_pd
#define _mm_mask_unpacklo_pd(src, k, a, b) simde_mm_mask_unpacklo_pd(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_unpacklo_pd(simde__mmask8 k, simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_maskz_unpacklo_pd(k, a, b);
#else
return simde_mm_maskz_mov_pd(k, simde_mm_unpacklo_pd(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_unpacklo_pd
#define _mm_maskz_unpacklo_pd(k, a, b) simde_mm_maskz_unpacklo_pd(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_UNPACKLO_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/srl.h | .h | 6,928 | 217 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SRL_H)
#define SIMDE_X86_AVX512_SRL_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "setzero.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srl_epi16 (simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_srl_epi16(a, count);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_srl_epi16(a_.m256i[i], count);
}
#else
simde__m128i_private
count_ = simde__m128i_to_private(count);
if (HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]) > 15)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = a_.u16 >> count_.i64[0];
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.u16[i] = a_.u16[i] >> count_.i64[0];
}
#endif
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_srl_epi16
#define _mm512_srl_epi16(a, count) simde_mm512_srl_epi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srl_epi32 (simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_srl_epi32(a, count);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_srl_epi32(a_.m256i[i], count);
}
#else
simde__m128i_private
count_ = simde__m128i_to_private(count);
if (HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]) > 31)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = a_.u32 >> count_.i64[0];
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.u32[i] = a_.u32[i] >> count_.i64[0];
}
#endif
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_srl_epi32
#define _mm512_srl_epi32(a, count) simde_mm512_srl_epi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_srl_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_srl_epi32(src, k, a, b);
#else
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_srl_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_srl_epi32
#define _mm512_mask_srl_epi32(src, k, a, b) simde_mm512_mask_srl_epi32(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_srl_epi32(simde__mmask16 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_srl_epi32(k, a, b);
#else
return simde_mm512_maskz_mov_epi32(k, simde_mm512_srl_epi32(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_srl_epi32
#define _mm512_maskz_srl_epi32(k, a, b) simde_mm512_maskz_srl_epi32(k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_srl_epi64 (simde__m512i a, simde__m128i count) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_srl_epi64(a, count);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_srl_epi64(a_.m256i[i], count);
}
#else
simde__m128i_private
count_ = simde__m128i_to_private(count);
if (HEDLEY_STATIC_CAST(uint64_t, count_.i64[0]) > 63)
return simde_mm512_setzero_si512();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u64 = a_.u64 >> count_.i64[0];
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.u64[i] = a_.u64[i] >> count_.i64[0];
}
#endif
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_srl_epi64
#define _mm512_srl_epi64(a, count) simde_mm512_srl_epi64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_srl_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_mask_srl_epi64(src, k, a, b);
#else
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_srl_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_srl_epi64
#define _mm512_mask_srl_epi64(src, k, a, b) simde_mm512_mask_srl_epi64(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_srl_epi64(simde__mmask8 k, simde__m512i a, simde__m128i b) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_maskz_srl_epi64(k, a, b);
#else
return simde_mm512_maskz_mov_epi64(k, simde_mm512_srl_epi64(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_srl_epi64
#define _mm512_maskz_srl_epi64(k, a, b) simde_mm512_maskz_srl_epi64(k, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SRL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/setzero.h | .h | 3,283 | 106 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
* 2020 Hidayat Khan <huk2209@gmail.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_X86_AVX512_SETZERO_H)
#define SIMDE_X86_AVX512_SETZERO_H
#include "types.h"
#include "cast.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_setzero_si512(void) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_setzero_si512();
#else
simde__m512i r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#define simde_mm512_setzero_epi32() simde_mm512_setzero_si512()
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setzero_si512
#define _mm512_setzero_si512() simde_mm512_setzero_si512()
#undef _mm512_setzero_epi32
#define _mm512_setzero_epi32() simde_mm512_setzero_si512()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_setzero_ps(void) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_setzero_ps();
#else
return simde_mm512_castsi512_ps(simde_mm512_setzero_si512());
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setzero_ps
#define _mm512_setzero_ps() simde_mm512_setzero_ps()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_setzero_pd(void) {
#if defined(SIMDE_X86_AVX512F_NATIVE)
return _mm512_setzero_pd();
#else
return simde_mm512_castsi512_pd(simde_mm512_setzero_si512());
#endif
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_setzero_pd
#define _mm512_setzero_pd() simde_mm512_setzero_pd()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512h
simde_mm512_setzero_ph(void) {
#if defined(SIMDE_X86_AVX512FP16_NATIVE)
return _mm512_setzero_ph();
#else
return simde_mm512_castsi512_ph(simde_mm512_setzero_si512());
#endif
}
#if defined(SIMDE_X86_AVX512FP16_ENABLE_NATIVE_ALIASES)
#undef _mm512_setzero_ph
#define _mm512_setzero_ph() simde_mm512_setzero_ph()
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SETZERO_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/fpclass.h | .h | 3,609 | 100 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2023 Michael R. Crusoe <crusoe@debian.org>
*/
#if !defined(SIMDE_X86_AVX512_FPCLASS_H)
#define SIMDE_X86_AVX512_FPCLASS_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm256_fpclass_ps_mask(simde__m256 a, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 0x88) {
simde__mmask8 r = 0;
simde__m256_private a_ = simde__m256_to_private(a);
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
r |= simde_math_fpclassf(a_.f32[i], imm8) ? (UINT8_C(1) << i) : 0;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
# define simde_mm256_fpclass_ps_mask(a, imm8) _mm256_fpclass_ps_mask((a), (imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
# undef _mm256_fpclass_ps_mask
# define _mm256_fpclass_ps_mask(a, imm8) simde_mm256_fpclass_ps_mask((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask32
simde_mm512_fpclass_ph_mask(simde__m512h a, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 0x88) {
simde__mmask32 r = 0;
simde__m512h_private a_ = simde__m512h_to_private(a);
for (size_t i = 0 ; i < (sizeof(a_.f16) / sizeof(a_.f16[0])) ; i++) {
r |= simde_fpclasshf(a_.f16[i], imm8) ? (UINT8_C(1) << i) : 0;
}
return r;
}
#if defined(SIMDE_X86_AVX512FP16_NATIVE)
# define simde_mm512_fpclass_ph_mask(a, imm8) _mm512_fpclass_ph_mask((a), (imm8))
#endif
#if defined(SIMDE_X86_AVX512FP16_ENABLE_NATIVE_ALIASES)
# undef _mm512_fpclass_ph_mask
# define _mm512_fpclass_ph_mask(a, imm8) simde_mm512_fpclass_ph_mask((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__mmask8
simde_mm512_fpclass_pd_mask(simde__m512d a, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 0x88) {
simde__mmask8 r = 0;
simde__m512d_private a_ = simde__m512d_to_private(a);
for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) {
r |= simde_math_fpclass(a_.f64[i], imm8) ? (UINT8_C(1) << i) : 0;
}
return r;
}
#if defined(SIMDE_X86_AVX512DQ_NATIVE)
# define simde_mm512_fpclass_pd_mask(a, imm8) _mm512_fpclass_pd_mask((a), (imm8))
#endif
#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES)
# undef _mm512_fpclass_pd_mask
# define _mm512_fpclass_pd_mask(a, imm8) simde_mm512_fpclass_pd_mask((a), (imm8))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_FPCLASS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/fixupimm_round.h | .h | 26,373 | 688 | #if !defined(SIMDE_X86_AVX512_FIXUPIMM_ROUND_H)
#define SIMDE_X86_AVX512_FIXUPIMM_ROUND_H
#include "types.h"
#include "fixupimm.h"
#include "mov.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) _mm512_fixupimm_round_ps(a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) simde_mm512_fixupimm_ps(a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512 simde_mm512_fixupimm_round_ps_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_fixupimm_round_ps_envp; \
int simde_mm512_fixupimm_round_ps_x = feholdexcept(&simde_mm512_fixupimm_round_ps_envp); \
simde_mm512_fixupimm_round_ps_r = simde_mm512_fixupimm_ps(a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm512_fixupimm_round_ps_x == 0)) \
fesetenv(&simde_mm512_fixupimm_round_ps_envp); \
} \
else { \
simde_mm512_fixupimm_round_ps_r = simde_mm512_fixupimm_ps(a, b, c, imm8); \
} \
\
simde_mm512_fixupimm_round_ps_r; \
}))
#else
#define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) simde_mm512_fixupimm_ps(a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_fixupimm_round_ps (simde__m512 a, simde__m512 b, simde__m512i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m512 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_fixupimm_ps(a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_fixupimm_ps(a, b, c, imm8);
#endif
}
else {
r = simde_mm512_fixupimm_ps(a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_fixupimm_round_ps
#define _mm512_fixupimm_round_ps(a, b, c, imm8, sae) simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) _mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512 simde_mm512_mask_fixupimm_round_ps_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_mask_fixupimm_round_ps_envp; \
int simde_mm512_mask_fixupimm_round_ps_x = feholdexcept(&simde_mm512_mask_fixupimm_round_ps_envp); \
simde_mm512_mask_fixupimm_round_ps_r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm512_mask_fixupimm_round_ps_x == 0)) \
fesetenv(&simde_mm512_mask_fixupimm_round_ps_envp); \
} \
else { \
simde_mm512_mask_fixupimm_round_ps_r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); \
} \
\
simde_mm512_mask_fixupimm_round_ps_r; \
}))
#else
#define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_mask_fixupimm_round_ps (simde__m512 a, simde__mmask16 k, simde__m512 b, simde__m512i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m512 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8);
#endif
}
else {
r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_fixupimm_round_ps
#define _mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) _mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512 simde_mm512_maskz_fixupimm_round_ps_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_maskz_fixupimm_round_ps_envp; \
int simde_mm512_maskz_fixupimm_round_ps_x = feholdexcept(&simde_mm512_maskz_fixupimm_round_ps_envp); \
simde_mm512_maskz_fixupimm_round_ps_r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm512_maskz_fixupimm_round_ps_x == 0)) \
fesetenv(&simde_mm512_maskz_fixupimm_round_ps_envp); \
} \
else { \
simde_mm512_maskz_fixupimm_round_ps_r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); \
} \
\
simde_mm512_maskz_fixupimm_round_ps_r; \
}))
#else
#define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_maskz_fixupimm_round_ps (simde__mmask16 k, simde__m512 a, simde__m512 b, simde__m512i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m512 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8);
#endif
}
else {
r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_fixupimm_round_ps
#define _mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) _mm512_fixupimm_round_pd(a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) simde_mm512_fixupimm_pd(a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d simde_mm512_fixupimm_round_pd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_fixupimm_round_pd_envp; \
int simde_mm512_fixupimm_round_pd_x = feholdexcept(&simde_mm512_fixupimm_round_pd_envp); \
simde_mm512_fixupimm_round_pd_r = simde_mm512_fixupimm_pd(a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm512_fixupimm_round_pd_x == 0)) \
fesetenv(&simde_mm512_fixupimm_round_pd_envp); \
} \
else { \
simde_mm512_fixupimm_round_pd_r = simde_mm512_fixupimm_pd(a, b, c, imm8); \
} \
\
simde_mm512_fixupimm_round_pd_r; \
}))
#else
#define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) simde_mm512_fixupimm_pd(a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_fixupimm_round_pd (simde__m512d a, simde__m512d b, simde__m512i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m512d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_fixupimm_pd(a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_fixupimm_pd(a, b, c, imm8);
#endif
}
else {
r = simde_mm512_fixupimm_pd(a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_fixupimm_round_pd
#define _mm512_fixupimm_round_pd(a, b, c, imm8, sae) simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) _mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d simde_mm512_mask_fixupimm_round_pd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_mask_fixupimm_round_pd_envp; \
int simde_mm512_mask_fixupimm_round_pd_x = feholdexcept(&simde_mm512_mask_fixupimm_round_pd_envp); \
simde_mm512_mask_fixupimm_round_pd_r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm512_mask_fixupimm_round_pd_x == 0)) \
fesetenv(&simde_mm512_mask_fixupimm_round_pd_envp); \
} \
else { \
simde_mm512_mask_fixupimm_round_pd_r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); \
} \
\
simde_mm512_mask_fixupimm_round_pd_r; \
}))
#else
#define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_mask_fixupimm_round_pd (simde__m512d a, simde__mmask8 k, simde__m512d b, simde__m512i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m512d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8);
#endif
}
else {
r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_fixupimm_round_pd
#define _mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) _mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m512d simde_mm512_maskz_fixupimm_round_pd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm512_maskz_fixupimm_round_pd_envp; \
int simde_mm512_maskz_fixupimm_round_pd_x = feholdexcept(&simde_mm512_maskz_fixupimm_round_pd_envp); \
simde_mm512_maskz_fixupimm_round_pd_r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm512_maskz_fixupimm_round_pd_x == 0)) \
fesetenv(&simde_mm512_maskz_fixupimm_round_pd_envp); \
} \
else { \
simde_mm512_maskz_fixupimm_round_pd_r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); \
} \
\
simde_mm512_maskz_fixupimm_round_pd_r; \
}))
#else
#define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_maskz_fixupimm_round_pd (simde__mmask8 k, simde__m512d a, simde__m512d b, simde__m512i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m512d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8);
#endif
}
else {
r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_fixupimm_round_pd
#define _mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) _mm_fixupimm_round_ss(a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) simde_mm_fixupimm_ss(a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128 simde_mm_fixupimm_round_ss_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_fixupimm_round_ss_envp; \
int simde_mm_fixupimm_round_ss_x = feholdexcept(&simde_mm_fixupimm_round_ss_envp); \
simde_mm_fixupimm_round_ss_r = simde_mm_fixupimm_ss(a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm_fixupimm_round_ss_x == 0)) \
fesetenv(&simde_mm_fixupimm_round_ss_envp); \
} \
else { \
simde_mm_fixupimm_round_ss_r = simde_mm_fixupimm_ss(a, b, c, imm8); \
} \
\
simde_mm_fixupimm_round_ss_r; \
}))
#else
#define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) simde_mm_fixupimm_ss(a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_fixupimm_round_ss (simde__m128 a, simde__m128 b, simde__m128i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_fixupimm_ss(a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_fixupimm_ss(a, b, c, imm8);
#endif
}
else {
r = simde_mm_fixupimm_ss(a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_fixupimm_round_ss
#define _mm_fixupimm_round_ss(a, b, c, imm8, sae) simde_mm_fixupimm_round_ss(a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) _mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_ss(a, k, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128 simde_mm_mask_fixupimm_round_ss_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_mask_fixupimm_round_ss_envp; \
int simde_mm_mask_fixupimm_round_ss_x = feholdexcept(&simde_mm_mask_fixupimm_round_ss_envp); \
simde_mm_mask_fixupimm_round_ss_r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm_mask_fixupimm_round_ss_x == 0)) \
fesetenv(&simde_mm_mask_fixupimm_round_ss_envp); \
} \
else { \
simde_mm_mask_fixupimm_round_ss_r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); \
} \
\
simde_mm_mask_fixupimm_round_ss_r; \
}))
#else
#define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_ss(a, k, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mask_fixupimm_round_ss (simde__m128 a, simde__mmask8 k, simde__m128 b, simde__m128i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8);
#endif
}
else {
r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_fixupimm_round_ss
#define _mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) _mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128 simde_mm_maskz_fixupimm_round_ss_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_maskz_fixupimm_round_ss_envp; \
int simde_mm_maskz_fixupimm_round_ss_x = feholdexcept(&simde_mm_maskz_fixupimm_round_ss_envp); \
simde_mm_maskz_fixupimm_round_ss_r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm_maskz_fixupimm_round_ss_x == 0)) \
fesetenv(&simde_mm_maskz_fixupimm_round_ss_envp); \
} \
else { \
simde_mm_maskz_fixupimm_round_ss_r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); \
} \
\
simde_mm_maskz_fixupimm_round_ss_r; \
}))
#else
#define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_maskz_fixupimm_round_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, simde__m128i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128 r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8);
#endif
}
else {
r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_fixupimm_round_ss
#define _mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) _mm_fixupimm_round_sd(a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) simde_mm_fixupimm_sd(a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d simde_mm_fixupimm_round_sd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_fixupimm_round_sd_envp; \
int simde_mm_fixupimm_round_sd_x = feholdexcept(&simde_mm_fixupimm_round_sd_envp); \
simde_mm_fixupimm_round_sd_r = simde_mm_fixupimm_sd(a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm_fixupimm_round_sd_x == 0)) \
fesetenv(&simde_mm_fixupimm_round_sd_envp); \
} \
else { \
simde_mm_fixupimm_round_sd_r = simde_mm_fixupimm_sd(a, b, c, imm8); \
} \
\
simde_mm_fixupimm_round_sd_r; \
}))
#else
#define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) simde_mm_fixupimm_sd(a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_fixupimm_round_sd (simde__m128d a, simde__m128d b, simde__m128i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_fixupimm_sd(a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_fixupimm_sd(a, b, c, imm8);
#endif
}
else {
r = simde_mm_fixupimm_sd(a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_fixupimm_round_sd
#define _mm_fixupimm_round_sd(a, b, c, imm8, sae) simde_mm_fixupimm_round_sd(a, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) _mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_sd(a, k, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d simde_mm_mask_fixupimm_round_sd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_mask_fixupimm_round_sd_envp; \
int simde_mm_mask_fixupimm_round_sd_x = feholdexcept(&simde_mm_mask_fixupimm_round_sd_envp); \
simde_mm_mask_fixupimm_round_sd_r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm_mask_fixupimm_round_sd_x == 0)) \
fesetenv(&simde_mm_mask_fixupimm_round_sd_envp); \
} \
else { \
simde_mm_mask_fixupimm_round_sd_r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); \
} \
\
simde_mm_mask_fixupimm_round_sd_r; \
}))
#else
#define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_sd(a, k, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mask_fixupimm_round_sd (simde__m128d a, simde__mmask8 k, simde__m128d b, simde__m128i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8);
#endif
}
else {
r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_mask_fixupimm_round_sd
#define _mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) _mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae)
#elif defined(SIMDE_FAST_EXCEPTIONS)
#define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
#if defined(SIMDE_HAVE_FENV_H)
#define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \
simde__m128d simde_mm_maskz_fixupimm_round_sd_r; \
\
if (sae & SIMDE_MM_FROUND_NO_EXC) { \
fenv_t simde_mm_maskz_fixupimm_round_sd_envp; \
int simde_mm_maskz_fixupimm_round_sd_x = feholdexcept(&simde_mm_maskz_fixupimm_round_sd_envp); \
simde_mm_maskz_fixupimm_round_sd_r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); \
if (HEDLEY_LIKELY(simde_mm_maskz_fixupimm_round_sd_x == 0)) \
fesetenv(&simde_mm_maskz_fixupimm_round_sd_envp); \
} \
else { \
simde_mm_maskz_fixupimm_round_sd_r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); \
} \
\
simde_mm_maskz_fixupimm_round_sd_r; \
}))
#else
#define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8)
#endif
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_maskz_fixupimm_round_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, simde__m128i c, int imm8, int sae)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15)
SIMDE_REQUIRE_CONSTANT(sae) {
simde__m128d r;
if (sae & SIMDE_MM_FROUND_NO_EXC) {
#if defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8);
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8);
#endif
}
else {
r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8);
}
return r;
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm_maskz_fixupimm_round_sd
#define _mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_FIXUPIMM_ROUND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/shuffle.h | .h | 16,258 | 362 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
* 2023 Michael R. Crusoe <crusoe@debian.org>
*/
#if !defined(SIMDE_X86_AVX512_SHUFFLE_H)
#define SIMDE_X86_AVX512_SHUFFLE_H
#include "types.h"
#include "../avx2.h"
#include "mov.h"
#include "extract.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_shuffle_epi8 (simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_shuffle_epi8(a, b);
#else
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
r_.m256i[i] = simde_mm256_shuffle_epi8(a_.m256i[i], b_.m256i[i]);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (b_.i8[i] & 0x80) ? 0 : a_.i8[(b_.i8[i] & 0x0f) + (i & 0x30)];
}
#endif
return simde__m512i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_shuffle_epi8
#define _mm512_shuffle_epi8(a, b) simde_mm512_shuffle_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_mask_shuffle_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_mask_shuffle_epi8(src, k, a, b);
#else
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_shuffle_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_mask_shuffle_epi8
#define _mm512_mask_shuffle_epi8(src, k, a, b) simde_mm512_mask_shuffle_epi8(src, k, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_maskz_shuffle_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
#if defined(SIMDE_X86_AVX512BW_NATIVE)
return _mm512_maskz_shuffle_epi8(k, a, b);
#else
return simde_mm512_maskz_mov_epi8(k, simde_mm512_shuffle_epi8(a, b));
#endif
}
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_maskz_shuffle_epi8
#define _mm512_maskz_shuffle_epi8(k, a, b) simde_mm512_maskz_shuffle_epi8(k, a, b)
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE)
# define simde_mm512_shuffle_epi32(a, imm8) _mm512_shuffle_epi32((a), (imm8))
#elif defined(SIMDE_STATEMENT_EXPR_)
# define simde_mm512_shuffle_epi32(a, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512i_private simde_mm512_shuffle_epi32_r_, \
simde_mm512_shuffle_epi32_a_ = simde__m512i_to_private((a)); \
simde_mm512_shuffle_epi32_r_.m128i[0] = simde_mm_shuffle_epi32( \
simde_mm512_shuffle_epi32_a_.m128i[0], (imm8)); \
simde_mm512_shuffle_epi32_r_.m128i[1] = simde_mm_shuffle_epi32( \
simde_mm512_shuffle_epi32_a_.m128i[1], (imm8)); \
simde_mm512_shuffle_epi32_r_.m128i[2] = simde_mm_shuffle_epi32( \
simde_mm512_shuffle_epi32_a_.m128i[2], (imm8)); \
simde_mm512_shuffle_epi32_r_.m128i[3] = simde_mm_shuffle_epi32( \
simde_mm512_shuffle_epi32_a_.m128i[3], (imm8)); \
simde__m512i_from_private(simde_mm512_shuffle_epi32_r_); \
}))
#else
# define simde_mm512_shuffle_epi32(a, imm8) \
simde_x_mm512_set_m128i( \
simde_mm_shuffle_epi32(simde_mm512_extracti32x4_epi32(a, 3), (imm8)), \
simde_mm_shuffle_epi32(simde_mm512_extracti32x4_epi32(a, 2), (imm8)), \
simde_mm_shuffle_epi32(simde_mm512_extracti32x4_epi32(a, 1), (imm8)), \
simde_mm_shuffle_epi32(simde_mm512_extracti32x4_epi32(a, 0), (imm8)))
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_shuffle_epi32
#define _mm512_shuffle_epi32(a, imm8) simde_mm512_shuffle_epi32((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_shuffle_i32x4 (simde__m256i a, simde__m256i b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
b_ = simde__m256i_to_private(b);
r_.m128i[0] = a_.m128i[ imm8 & 1];
r_.m128i[1] = b_.m128i[(imm8 >> 1) & 1];
return simde__m256i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
#define simde_mm256_shuffle_i32x4(a, b, imm8) _mm256_shuffle_i32x4(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
#undef _mm256_shuffle_i32x4
#define _mm256_shuffle_i32x4(a, b, imm8) simde_mm256_shuffle_i32x4(a, b, imm8)
#endif
#define simde_mm256_maskz_shuffle_i32x4(k, a, b, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_shuffle_i32x4(a, b, imm8))
#define simde_mm256_mask_shuffle_i32x4(src, k, a, b, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_shuffle_i32x4(a, b, imm8))
#define simde_mm256_shuffle_f32x4(a, b, imm8) simde_mm256_castsi256_ps(simde_mm256_shuffle_i32x4(simde_mm256_castps_si256(a), simde_mm256_castps_si256(b), imm8))
#define simde_mm256_maskz_shuffle_f32x4(k, a, b, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_shuffle_f32x4(a, b, imm8))
#define simde_mm256_mask_shuffle_f32x4(src, k, a, b, imm8) simde_mm256_mask_mov_ps(src, k, simde_mm256_shuffle_f32x4(a, b, imm8))
#define simde_mm256_shuffle_i64x2(a, b, imm8) simde_mm256_shuffle_i32x4(a, b, imm8)
#define simde_mm256_maskz_shuffle_i64x2(k, a, b, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_shuffle_i64x2(a, b, imm8))
#define simde_mm256_mask_shuffle_i64x2(src, k, a, b, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_shuffle_i64x2(a, b, imm8))
#define simde_mm256_shuffle_f64x2(a, b, imm8) simde_mm256_castsi256_pd(simde_mm256_shuffle_i64x2(simde_mm256_castpd_si256(a), simde_mm256_castpd_si256(b), imm8))
#define simde_mm256_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_shuffle_f64x2(a, b, imm8))
#define simde_mm256_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm256_mask_mov_pd(src, k, simde_mm256_shuffle_f64x2(a, b, imm8))
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_shuffle_i32x4 (simde__m512i a, simde__m512i b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m512i_private
r_,
a_ = simde__m512i_to_private(a),
b_ = simde__m512i_to_private(b);
r_.m128i[0] = a_.m128i[ imm8 & 3];
r_.m128i[1] = a_.m128i[(imm8 >> 2) & 3];
r_.m128i[2] = b_.m128i[(imm8 >> 4) & 3];
r_.m128i[3] = b_.m128i[(imm8 >> 6) & 3];
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_shuffle_i32x4(a, b, imm8) _mm512_shuffle_i32x4(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_shuffle_i32x4
#define _mm512_shuffle_i32x4(a, b, imm8) simde_mm512_shuffle_i32x4(a, b, imm8)
#endif
#define simde_mm512_maskz_shuffle_i32x4(k, a, b, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_shuffle_i32x4(a, b, imm8))
#define simde_mm512_mask_shuffle_i32x4(src, k, a, b, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_shuffle_i32x4(a, b, imm8))
#define simde_mm512_shuffle_f32x4(a, b, imm8) simde_mm512_castsi512_ps(simde_mm512_shuffle_i32x4(simde_mm512_castps_si512(a), simde_mm512_castps_si512(b), imm8))
#define simde_mm512_maskz_shuffle_f32x4(k, a, b, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_shuffle_f32x4(a, b, imm8))
#define simde_mm512_mask_shuffle_f32x4(src, k, a, b, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_shuffle_f32x4(a, b, imm8))
#define simde_mm512_shuffle_i64x2(a, b, imm8) simde_mm512_shuffle_i32x4(a, b, imm8)
#define simde_mm512_maskz_shuffle_i64x2(k, a, b, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_shuffle_i64x2(a, b, imm8))
#define simde_mm512_mask_shuffle_i64x2(src, k, a, b, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_shuffle_i64x2(a, b, imm8))
#define simde_mm512_shuffle_f64x2(a, b, imm8) simde_mm512_castsi512_pd(simde_mm512_shuffle_i64x2(simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b), imm8))
#define simde_mm512_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_shuffle_f64x2(a, b, imm8))
#define simde_mm512_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_shuffle_f64x2(a, b, imm8))
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_shuffle_ps(a, b, imm8) _mm512_shuffle_ps(a, b, imm8)
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_shuffle_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512_private \
simde_mm512_shuffle_ps_a_ = simde__m512_to_private(a), \
simde_mm512_shuffle_ps_b_ = simde__m512_to_private(b); \
\
simde_mm512_shuffle_ps_a_.m256[0] = simde_mm256_shuffle_ps(simde_mm512_shuffle_ps_a_.m256[0], simde_mm512_shuffle_ps_b_.m256[0], imm8); \
simde_mm512_shuffle_ps_a_.m256[1] = simde_mm256_shuffle_ps(simde_mm512_shuffle_ps_a_.m256[1], simde_mm512_shuffle_ps_b_.m256[1], imm8); \
\
simde__m512_from_private(simde_mm512_shuffle_ps_a_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm512_shuffle_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512_private \
simde_mm512_shuffle_ps_a_ = simde__m512_to_private(a), \
simde_mm512_shuffle_ps_b_ = simde__m512_to_private(b); \
\
simde_mm512_shuffle_ps_a_.f32 = \
SIMDE_SHUFFLE_VECTOR_( \
32, 64, \
simde_mm512_shuffle_ps_a_.f32, \
simde_mm512_shuffle_ps_b_.f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 16, \
(((imm8) >> 6) & 3) + 16, \
(((imm8) ) & 3) + 4, \
(((imm8) >> 2) & 3) + 4, \
(((imm8) >> 4) & 3) + 20, \
(((imm8) >> 6) & 3) + 20, \
(((imm8) ) & 3) + 8, \
(((imm8) >> 2) & 3) + 8, \
(((imm8) >> 4) & 3) + 24, \
(((imm8) >> 6) & 3) + 24, \
(((imm8) ) & 3) + 12, \
(((imm8) >> 2) & 3) + 12, \
(((imm8) >> 4) & 3) + 28, \
(((imm8) >> 6) & 3) + 28 \
); \
\
simde__m512_from_private(simde_mm512_shuffle_ps_a_); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_shuffle_ps(simde__m512 a, simde__m512 b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) {
simde__m512_private
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);
const size_t halfway = (sizeof(r_.m128_private[0].f32) / sizeof(r_.m128_private[0].f32[0]) / 2);
for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) {
SIMDE_VECTORIZE
for (size_t j = 0 ; j < halfway ; j++) {
r_.m128_private[i].f32[j] = a_.m128_private[i].f32[(imm8 >> (j * 2)) & 3];
r_.m128_private[i].f32[halfway + j] = b_.m128_private[i].f32[(imm8 >> ((halfway + j) * 2)) & 3];
}
}
return simde__m512_from_private(r_);
}
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_shuffle_ps
#define _mm512_shuffle_ps(a, b, imm8) simde_mm512_shuffle_ps(a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_shuffle_pd(simde__m512d a, simde__m512d b, int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) {
simde__m512d_private
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_.f64) / sizeof(r_.f64[0])) / 2) ; i++) {
r_.f64[i * 2] = (imm8 & ( 1 << (i*2) )) ? a_.f64[i * 2 + 1]: a_.f64[i * 2];
r_.f64[i * 2 + 1] = (imm8 & ( 1 << (i*2+1) )) ? b_.f64[i * 2 + 1]: b_.f64[i * 2];
}
return simde__m512d_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define simde_mm512_shuffle_pd(a, b, imm8) _mm512_shuffle_pd(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_shuffle_pd
#define _mm512_shuffle_pd(a, b, imm8) simde_mm512_shuffle_pd(a, b, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE)
# define simde_mm512_shufflehi_epi16(a, imm8) _mm512_shufflehi_epi16(a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
# define simde_mm512_shufflehi_epi16(a, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512i_private simde_mm512_shufflehi_epi16_r_, \
simde_mm512_shufflehi_epi16_a_ = simde__m512i_to_private((a)); \
simde_mm512_shufflehi_epi16_r_.m128i[0] = simde_mm_shufflehi_epi16( \
simde_mm512_shufflehi_epi16_a_.m128i[0], (imm8)); \
simde_mm512_shufflehi_epi16_r_.m128i[1] = simde_mm_shufflehi_epi16( \
simde_mm512_shufflehi_epi16_a_.m128i[1], (imm8)); \
simde_mm512_shufflehi_epi16_r_.m128i[2] = simde_mm_shufflehi_epi16( \
simde_mm512_shufflehi_epi16_a_.m128i[2], (imm8)); \
simde_mm512_shufflehi_epi16_r_.m128i[3] = simde_mm_shufflehi_epi16( \
simde_mm512_shufflehi_epi16_a_.m128i[3], (imm8)); \
simde__m512i_from_private(simde_mm512_shufflehi_epi16_r_); \
}))
#else
# define simde_mm512_shufflehi_epi16(a, imm8) \
simde_x_mm512_set_m128i( \
simde_mm_shufflehi_epi16(simde_mm512_extracti32x4_epi32((a), 3), (imm8)), \
simde_mm_shufflehi_epi16(simde_mm512_extracti32x4_epi32((a), 2), (imm8)), \
simde_mm_shufflehi_epi16(simde_mm512_extracti32x4_epi32((a), 1), (imm8)), \
simde_mm_shufflehi_epi16(simde_mm512_extracti32x4_epi32((a), 0), (imm8)))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_shufflehi_epi16
#define _mm512_shufflehi_epi16(a, imm8) simde_mm512_shufflehi_epi16(a, imm8)
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE)
# define simde_mm512_shufflelo_epi16(a, imm8) _mm512_shufflelo_epi16(a, imm8)
#elif defined(SIMDE_STATEMENT_EXPR_)
# define simde_mm512_shufflelo_epi16(a, imm8) SIMDE_STATEMENT_EXPR_(({ \
simde__m512i_private simde_mm512_shufflelo_epi16_r_, \
simde_mm512_shufflelo_epi16_a_ = simde__m512i_to_private((a)); \
simde_mm512_shufflelo_epi16_r_.m128i[0] = simde_mm_shufflelo_epi16( \
simde_mm512_shufflelo_epi16_a_.m128i[0], (imm8)); \
simde_mm512_shufflelo_epi16_r_.m128i[1] = simde_mm_shufflelo_epi16( \
simde_mm512_shufflelo_epi16_a_.m128i[1], (imm8)); \
simde_mm512_shufflelo_epi16_r_.m128i[2] = simde_mm_shufflelo_epi16( \
simde_mm512_shufflelo_epi16_a_.m128i[2], (imm8)); \
simde_mm512_shufflelo_epi16_r_.m128i[3] = simde_mm_shufflelo_epi16( \
simde_mm512_shufflelo_epi16_a_.m128i[3], (imm8)); \
simde__m512i_from_private(simde_mm512_shufflelo_epi16_r_); \
}))
#else
# define simde_mm512_shufflelo_epi16(a, imm8) \
simde_x_mm512_set_m128i( \
simde_mm_shufflelo_epi16(simde_mm512_extracti32x4_epi32((a), 3), (imm8)), \
simde_mm_shufflelo_epi16(simde_mm512_extracti32x4_epi32((a), 2), (imm8)), \
simde_mm_shufflelo_epi16(simde_mm512_extracti32x4_epi32((a), 1), (imm8)), \
simde_mm_shufflelo_epi16(simde_mm512_extracti32x4_epi32((a), 0), (imm8)))
#endif
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
#undef _mm512_shufflelo_epi16
#define _mm512_shufflelo_epi16(a, imm8) simde_mm512_shufflelo_epi16(a, imm8)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SHUFFLE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/set4.h | .h | 3,705 | 141 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Hidayat Khan <huk2209@gmail.com>
*/
#if !defined(SIMDE_X86_AVX512_SET4_H)
#define SIMDE_X86_AVX512_SET4_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_set4_epi32 (int32_t d, int32_t c, int32_t b, int32_t a) {
simde__m512i_private r_;
r_.i32[ 0] = a;
r_.i32[ 1] = b;
r_.i32[ 2] = c;
r_.i32[ 3] = d;
r_.i32[ 4] = a;
r_.i32[ 5] = b;
r_.i32[ 6] = c;
r_.i32[ 7] = d;
r_.i32[ 8] = a;
r_.i32[ 9] = b;
r_.i32[10] = c;
r_.i32[11] = d;
r_.i32[12] = a;
r_.i32[13] = b;
r_.i32[14] = c;
r_.i32[15] = d;
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_set4_epi32
#define _mm512_set4_epi32(d,c,b,a) simde_mm512_set4_epi32(d,c,b,a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512i
simde_mm512_set4_epi64 (int64_t d, int64_t c, int64_t b, int64_t a) {
simde__m512i_private r_;
r_.i64[0] = a;
r_.i64[1] = b;
r_.i64[2] = c;
r_.i64[3] = d;
r_.i64[4] = a;
r_.i64[5] = b;
r_.i64[6] = c;
r_.i64[7] = d;
return simde__m512i_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_set4_epi64
#define _mm512_set4_epi64(d,c,b,a) simde_mm512_set4_epi64(d,c,b,a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_mm512_set4_ps (simde_float32 d, simde_float32 c, simde_float32 b, simde_float32 a) {
simde__m512_private r_;
r_.f32[ 0] = a;
r_.f32[ 1] = b;
r_.f32[ 2] = c;
r_.f32[ 3] = d;
r_.f32[ 4] = a;
r_.f32[ 5] = b;
r_.f32[ 6] = c;
r_.f32[ 7] = d;
r_.f32[ 8] = a;
r_.f32[ 9] = b;
r_.f32[10] = c;
r_.f32[11] = d;
r_.f32[12] = a;
r_.f32[13] = b;
r_.f32[14] = c;
r_.f32[15] = d;
return simde__m512_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_set4_ps
#define _mm512_set4_ps(d,c,b,a) simde_mm512_set4_ps(d,c,b,a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_mm512_set4_pd (simde_float64 d, simde_float64 c, simde_float64 b, simde_float64 a) {
simde__m512d_private r_;
r_.f64[0] = a;
r_.f64[1] = b;
r_.f64[2] = c;
r_.f64[3] = d;
r_.f64[4] = a;
r_.f64[5] = b;
r_.f64[6] = c;
r_.f64[7] = d;
return simde__m512d_from_private(r_);
}
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
#undef _mm512_set4_pd
#define _mm512_set4_pd(d,c,b,a) simde_mm512_set4_pd(d,c,b,a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_SET4_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/x86/avx512/xorsign.h | .h | 2,426 | 73 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Himanshi Mathur <himanshi18037@iiitd.ac.in>
*/
/* This is a SIMDe extension which is not part of AVX-512. It exists
* because a lot of numerical methods in SIMDe have algoriths which do
* something like:
*
* float sgn = input < 0 ? -1 : 1;
* ...
* return res * sgn;
*
* Which can be replaced with a much more efficient call to xorsign:
*
* return simde_x_mm512_xorsign_ps(res, input);
*
* While this was originally intended for use in SIMDe, please feel
* free to use it in your code.
*/
#if !defined(SIMDE_X86_AVX512_XORSIGN_H)
#define SIMDE_X86_AVX512_XORSIGN_H
#include "types.h"
#include "mov.h"
#include "and.h"
#include "xor.h"
#include "set1.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde__m512
simde_x_mm512_xorsign_ps(simde__m512 dest, simde__m512 src) {
return simde_mm512_xor_ps(simde_mm512_and_ps(simde_mm512_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m512d
simde_x_mm512_xorsign_pd(simde__m512d dest, simde__m512d src) {
return simde_mm512_xor_pd(simde_mm512_and_pd(simde_mm512_set1_pd(-0.0), src), dest);
}
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_AVX512_XORSIGN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon.h | .h | 5,792 | 219 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_H)
#define SIMDE_ARM_NEON_H
#include "neon/types.h"
#include "neon/aba.h"
#include "neon/abd.h"
#include "neon/abdl.h"
#include "neon/abs.h"
#include "neon/add.h"
#include "neon/addhn.h"
#include "neon/addl.h"
#include "neon/addlv.h"
#include "neon/addl_high.h"
#include "neon/addv.h"
#include "neon/addw.h"
#include "neon/addw_high.h"
#include "neon/and.h"
#include "neon/bcax.h"
#include "neon/bic.h"
#include "neon/bsl.h"
#include "neon/cage.h"
#include "neon/cagt.h"
#include "neon/ceq.h"
#include "neon/ceqz.h"
#include "neon/cge.h"
#include "neon/cgez.h"
#include "neon/cgt.h"
#include "neon/cgtz.h"
#include "neon/cle.h"
#include "neon/clez.h"
#include "neon/cls.h"
#include "neon/clt.h"
#include "neon/cltz.h"
#include "neon/clz.h"
#include "neon/cmla.h"
#include "neon/cmla_rot90.h"
#include "neon/cmla_rot180.h"
#include "neon/cmla_rot270.h"
#include "neon/cnt.h"
#include "neon/cvt.h"
#include "neon/cvtn.h"
#include "neon/combine.h"
#include "neon/create.h"
#include "neon/dot.h"
#include "neon/dot_lane.h"
#include "neon/dup_lane.h"
#include "neon/dup_n.h"
#include "neon/eor.h"
#include "neon/ext.h"
#include "neon/fma.h"
#include "neon/fma_lane.h"
#include "neon/fma_n.h"
#include "neon/get_high.h"
#include "neon/get_lane.h"
#include "neon/get_low.h"
#include "neon/hadd.h"
#include "neon/hsub.h"
#include "neon/ld1.h"
#include "neon/ld1_dup.h"
#include "neon/ld1_lane.h"
#include "neon/ld1_x2.h"
#include "neon/ld1_x3.h"
#include "neon/ld1_x4.h"
#include "neon/ld1q_x2.h"
#include "neon/ld1q_x3.h"
#include "neon/ld1q_x4.h"
#include "neon/ld2.h"
#include "neon/ld3.h"
#include "neon/ld4.h"
#include "neon/ld4_lane.h"
#include "neon/max.h"
#include "neon/maxnm.h"
#include "neon/maxv.h"
#include "neon/min.h"
#include "neon/minnm.h"
#include "neon/minv.h"
#include "neon/mla.h"
#include "neon/mla_lane.h"
#include "neon/mla_n.h"
#include "neon/mlal.h"
#include "neon/mlal_high.h"
#include "neon/mlal_high_n.h"
#include "neon/mlal_lane.h"
#include "neon/mlal_n.h"
#include "neon/mls.h"
#include "neon/mls_n.h"
#include "neon/mlsl.h"
#include "neon/mlsl_high.h"
#include "neon/mlsl_high_n.h"
#include "neon/mlsl_lane.h"
#include "neon/mlsl_n.h"
#include "neon/movl.h"
#include "neon/movl_high.h"
#include "neon/movn.h"
#include "neon/movn_high.h"
#include "neon/mul.h"
#include "neon/mul_lane.h"
#include "neon/mul_n.h"
#include "neon/mull.h"
#include "neon/mull_high.h"
#include "neon/mull_lane.h"
#include "neon/mull_n.h"
#include "neon/mvn.h"
#include "neon/neg.h"
#include "neon/orn.h"
#include "neon/orr.h"
#include "neon/padal.h"
#include "neon/padd.h"
#include "neon/paddl.h"
#include "neon/pmax.h"
#include "neon/pmin.h"
#include "neon/qabs.h"
#include "neon/qadd.h"
#include "neon/qdmulh.h"
#include "neon/qdmulh_lane.h"
#include "neon/qdmulh_n.h"
#include "neon/qdmull.h"
#include "neon/qrdmulh.h"
#include "neon/qrdmulh_lane.h"
#include "neon/qrdmulh_n.h"
#include "neon/qrshrn_n.h"
#include "neon/qrshrun_n.h"
#include "neon/qmovn.h"
#include "neon/qmovun.h"
#include "neon/qmovn_high.h"
#include "neon/qneg.h"
#include "neon/qsub.h"
#include "neon/qshl.h"
#include "neon/qshlu_n.h"
#include "neon/qshrn_n.h"
#include "neon/qshrun_n.h"
#include "neon/qtbl.h"
#include "neon/qtbx.h"
#include "neon/rbit.h"
#include "neon/recpe.h"
#include "neon/recps.h"
#include "neon/reinterpret.h"
#include "neon/rev16.h"
#include "neon/rev32.h"
#include "neon/rev64.h"
#include "neon/rhadd.h"
#include "neon/rnd.h"
#include "neon/rndm.h"
#include "neon/rndi.h"
#include "neon/rndn.h"
#include "neon/rndp.h"
#include "neon/rshl.h"
#include "neon/rshr_n.h"
#include "neon/rshrn_n.h"
#include "neon/rsqrte.h"
#include "neon/rsqrts.h"
#include "neon/rsra_n.h"
#include "neon/set_lane.h"
#include "neon/shl.h"
#include "neon/shl_n.h"
#include "neon/shll_n.h"
#include "neon/shr_n.h"
#include "neon/shrn_n.h"
#include "neon/sqadd.h"
#include "neon/sra_n.h"
#include "neon/sri_n.h"
#include "neon/st1.h"
#include "neon/st1_lane.h"
#include "neon/st2.h"
#include "neon/st2_lane.h"
#include "neon/st3.h"
#include "neon/st3_lane.h"
#include "neon/st4.h"
#include "neon/st4_lane.h"
#include "neon/sub.h"
#include "neon/subhn.h"
#include "neon/subl.h"
#include "neon/subl_high.h"
#include "neon/subw.h"
#include "neon/subw_high.h"
#include "neon/tbl.h"
#include "neon/tbx.h"
#include "neon/trn.h"
#include "neon/trn1.h"
#include "neon/trn2.h"
#include "neon/tst.h"
#include "neon/uqadd.h"
#include "neon/uzp.h"
#include "neon/uzp1.h"
#include "neon/uzp2.h"
#include "neon/xar.h"
#include "neon/zip.h"
#include "neon/zip1.h"
#include "neon/zip2.h"
#endif /* SIMDE_ARM_NEON_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve.h | .h | 1,606 | 48 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_H)
#define SIMDE_ARM_SVE_H
#include "sve/types.h"
#include "sve/add.h"
#include "sve/and.h"
#include "sve/cnt.h"
#include "sve/cmplt.h"
#include "sve/dup.h"
#include "sve/ld1.h"
#include "sve/ptest.h"
#include "sve/ptrue.h"
#include "sve/qadd.h"
#include "sve/reinterpret.h"
#include "sve/sel.h"
#include "sve/st1.h"
#include "sve/sub.h"
#include "sve/whilelt.h"
#endif /* SIMDE_ARM_SVE_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/cmplt.h | .h | 26,247 | 504 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_CMPLT_H)
#define SIMDE_ARM_SVE_CMPLT_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_s8(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_s8(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask64(_mm512_mask_cmplt_epi8_mask(simde_svbool_to_mmask64(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask32(_mm256_mask_cmplt_epi8_mask(simde_svbool_to_mmask32(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_i8 = vandq_s8(pg.neon_i8, vreinterpretq_s8_u8(vcltq_s8(op1.neon, op2.neon)));
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], _mm_cmplt_epi8(op1.m128i[i], op2.m128i[i]));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b8 = vec_and(pg.altivec_b8, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b8 = pg.altivec_b8 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_i8x16_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_i8 = pg.values_i8 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i8), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i8) / sizeof(r.values_i8[0])) ; i++) {
r.values_i8[i] = pg.values_i8[i] & ((op1.values[i] < op2.values[i]) ? ~INT8_C(0) : INT8_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_s8
#define svcmplt_s8(pg, op1, op2) simde_svcmplt_s8(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_s16(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_s16(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask32(_mm512_mask_cmplt_epi16_mask(simde_svbool_to_mmask32(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask16(_mm256_mask_cmplt_epi16_mask(simde_svbool_to_mmask16(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_i16 = vandq_s16(pg.neon_i16, vreinterpretq_s16_u16(vcltq_s16(op1.neon, op2.neon)));
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], _mm_cmplt_epi16(op1.m128i[i], op2.m128i[i]));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b16 = vec_and(pg.altivec_b16, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b16 = pg.altivec_b16 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_i16x8_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_i16 = pg.values_i16 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i16), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i16) / sizeof(r.values_i16[0])) ; i++) {
r.values_i16[i] = pg.values_i16[i] & ((op1.values[i] < op2.values[i]) ? ~INT16_C(0) : INT16_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_s16
#define svcmplt_s16(pg, op1, op2) simde_svcmplt_s16(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_s32(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_s32(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask16(_mm512_mask_cmplt_epi32_mask(simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask8(_mm256_mask_cmplt_epi32_mask(simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_i32 = vandq_s32(pg.neon_i32, vreinterpretq_s32_u32(vcltq_s32(op1.neon, op2.neon)));
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], _mm_cmplt_epi32(op1.m128i[i], op2.m128i[i]));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b32 = vec_and(pg.altivec_b32, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b32 = pg.altivec_b32 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_i32x4_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_i32 = pg.values_i32 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i32), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i32) / sizeof(r.values_i32[0])) ; i++) {
r.values_i32[i] = pg.values_i32[i] & ((op1.values[i] < op2.values[i]) ? ~INT32_C(0) : INT32_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_s32
#define svcmplt_s32(pg, op1, op2) simde_svcmplt_s32(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_s64(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_s64(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask8(_mm512_mask_cmplt_epi64_mask(simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask4(_mm256_mask_cmplt_epi64_mask(simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r.neon_i64 = vandq_s64(pg.neon_i64, vreinterpretq_s64_u64(vcltq_s64(op1.neon, op2.neon)));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r.altivec_b64 = vec_and(pg.altivec_b64, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b64 = pg.altivec_b64 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_WASM_TODO)
r.v128 = wasm_v128_and(pg.v128, wasm_i64x2_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_i64 = pg.values_i64 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i64), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i64) / sizeof(r.values_i64[0])) ; i++) {
r.values_i64[i] = pg.values_i64[i] & ((op1.values[i] < op2.values[i]) ? ~INT64_C(0) : INT64_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_s64
#define svcmplt_s64(pg, op1, op2) simde_svcmplt_s64(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_u8(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_u8(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask64(_mm512_mask_cmplt_epu8_mask(simde_svbool_to_mmask64(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask32(_mm256_mask_cmplt_epu8_mask(simde_svbool_to_mmask32(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_u8 = vandq_u8(pg.neon_u8, vcltq_u8(op1.neon, op2.neon));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b8 = vec_and(pg.altivec_b8, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b8 = pg.altivec_b8 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_u8x16_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_u8 = pg.values_u8 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u8), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u8) / sizeof(r.values_u8[0])) ; i++) {
r.values_u8[i] = pg.values_u8[i] & ((op1.values[i] < op2.values[i]) ? ~UINT8_C(0) : UINT8_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_u8
#define svcmplt_u8(pg, op1, op2) simde_svcmplt_u8(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_u16(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_u16(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask32(_mm512_mask_cmplt_epu16_mask(simde_svbool_to_mmask32(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask16(_mm256_mask_cmplt_epu16_mask(simde_svbool_to_mmask16(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_u16 = vandq_u16(pg.neon_u16, vcltq_u16(op1.neon, op2.neon));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b16 = vec_and(pg.altivec_b16, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b16 = pg.altivec_b16 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_u16x8_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_u16 = pg.values_u16 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u16), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u16) / sizeof(r.values_u16[0])) ; i++) {
r.values_u16[i] = pg.values_u16[i] & ((op1.values[i] < op2.values[i]) ? ~UINT16_C(0) : UINT16_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_u16
#define svcmplt_u16(pg, op1, op2) simde_svcmplt_u16(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_u32(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_u32(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask16(_mm512_mask_cmplt_epu32_mask(simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask8(_mm256_mask_cmplt_epu32_mask(simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_u32 = vandq_u32(pg.neon_u32, vcltq_u32(op1.neon, op2.neon));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b32 = vec_and(pg.altivec_b32, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b32 = pg.altivec_b32 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_u32x4_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_u32 = pg.values_u32 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u32), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u32) / sizeof(r.values_u32[0])) ; i++) {
r.values_u32[i] = pg.values_u32[i] & ((op1.values[i] < op2.values[i]) ? ~UINT32_C(0) : UINT32_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_u32
#define svcmplt_u32(pg, op1, op2) simde_svcmplt_u32(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_u64(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_u64(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask8(_mm512_mask_cmplt_epu64_mask(simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask4(_mm256_mask_cmplt_epu64_mask(simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0]));
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r.neon_u64 = vandq_u64(pg.neon_u64, vcltq_u64(op1.neon, op2.neon));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r.altivec_b64 = vec_and(pg.altivec_b64, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b64 = pg.altivec_b64 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_WASM_TODO)
r.v128 = wasm_v128_and(pg.v128, wasm_u64x2_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_u64 = pg.values_u64 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u64), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u64) / sizeof(r.values_u64[0])) ; i++) {
r.values_u64[i] = pg.values_u64[i] & ((op1.values[i] < op2.values[i]) ? ~UINT64_C(0) : UINT64_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_u64
#define svcmplt_u64(pg, op1, op2) simde_svcmplt_u64(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_f32(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_f32(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask16(_mm512_mask_cmp_ps_mask(simde_svbool_to_mmask16(pg), op1.m512, op2.m512, _CMP_LT_OQ));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask8(_mm256_mask_cmp_ps_mask(simde_svbool_to_mmask8(pg), op1.m256[0], op2.m256[0], _CMP_LT_OQ));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon_u32 = vandq_u32(pg.neon_u32, vcltq_f32(op1.neon, op2.neon));
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_castps_si128(_mm_and_ps(_mm_castsi128_ps(pg.m128i[i]), _mm_cmplt_ps(op1.m128[i], op2.m128[i])));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec_b32 = vec_and(pg.altivec_b32, vec_cmplt(op1.altivec, op2.altivec));
#elif defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r.altivec_b32 = pg.altivec_b32 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, wasm_f32x4_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_i32 = pg.values_i32 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i32), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i32) / sizeof(r.values_i32[0])) ; i++) {
r.values_i32[i] = pg.values_i32[i] & ((op1.values[i] < op2.values[i]) ? ~INT32_C(0) : INT32_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_f32
#define svcmplt_f32(pg, op1, op2) simde_svcmplt_f32(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svcmplt_f64(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcmplt_f64(pg, op1, op2);
#else
simde_svbool_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask8(_mm512_mask_cmp_pd_mask(simde_svbool_to_mmask8(pg), op1.m512d, op2.m512d, _CMP_LT_OQ));
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r = simde_svbool_from_mmask4(_mm256_mask_cmp_pd_mask(simde_svbool_to_mmask4(pg), op1.m256d[0], op2.m256d[0], _CMP_LT_OQ));
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r.neon_u64 = vandq_u64(pg.neon_u64, vcltq_f64(op1.neon, op2.neon));
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_castpd_si128(_mm_and_pd(_mm_castsi128_pd(pg.m128i[i]), _mm_cmplt_pd(op1.m128d[i], op2.m128d[i])));
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec_b64 = pg.altivec_b64 & vec_cmplt(op1.altivec, op2.altivec);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_WASM_TODO)
r.v128 = wasm_v128_and(pg.v128, wasm_f64x2_lt(op1.v128, op2.v128));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values_i64 = pg.values_i64 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i64), op1.values < op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i64) / sizeof(r.values_i64[0])) ; i++) {
r.values_i64[i] = pg.values_i64[i] & ((op1.values[i] < op2.values[i]) ? ~INT64_C(0) : INT64_C(0));
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcmplt_f64
#define svcmplt_f64(pg, op1, op2) simde_svcmplt_f64(pg, op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svcmplt_s8(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svcmplt_s16(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svcmplt_s32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svcmplt_s64(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svcmplt_u8(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svcmplt_u16(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svcmplt_u32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svcmplt_u64(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svcmplt_f32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svcmplt_f64(pg, op1, op2); }
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint8_t op1, svint8_t op2) { return svcmplt_s8(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint16_t op1, svint16_t op2) { return svcmplt_s16(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint32_t op1, svint32_t op2) { return svcmplt_s32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint64_t op1, svint64_t op2) { return svcmplt_s64(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint8_t op1, svuint8_t op2) { return svcmplt_u8(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint16_t op1, svuint16_t op2) { return svcmplt_u16(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint32_t op1, svuint32_t op2) { return svcmplt_u32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint64_t op1, svuint64_t op2) { return svcmplt_u64(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svfloat32_t op1, svfloat32_t op2) { return svcmplt_f32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svfloat64_t op1, svfloat64_t op2) { return svcmplt_f64(pg, op1, op2); }
#endif
#elif defined(SIMDE_GENERIC_)
#define simde_svcmplt(pg, op1, op2) \
(SIMDE_GENERIC_((op1), \
simde_svint8_t: simde_svcmplt_s8)(pg, op1, op2), \
simde_svint16_t: simde_svcmplt_s16)(pg, op1, op2), \
simde_svint32_t: simde_svcmplt_s32)(pg, op1, op2), \
simde_svint64_t: simde_svcmplt_s64)(pg, op1, op2), \
simde_svuint8_t: simde_svcmplt_u8)(pg, op1, op2), \
simde_svuint16_t: simde_svcmplt_u16)(pg, op1, op2), \
simde_svuint32_t: simde_svcmplt_u32)(pg, op1, op2), \
simde_svuint64_t: simde_svcmplt_u64)(pg, op1, op2), \
simde_svint32_t: simde_svcmplt_f32)(pg, op1, op2), \
simde_svint64_t: simde_svcmplt_f64)(pg, op1, op2))
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#define svcmplt(pg, op1, op2) \
(SIMDE_GENERIC_((op1), \
svint8_t: svcmplt_s8)(pg, op1, op2), \
svint16_t: svcmplt_s16)(pg, op1, op2), \
svint32_t: svcmplt_s32)(pg, op1, op2), \
svint64_t: svcmplt_s64)(pg, op1, op2), \
svuint8_t: svcmplt_u8)(pg, op1, op2), \
svuint16_t: svcmplt_u16)(pg, op1, op2), \
svuint32_t: svcmplt_u32)(pg, op1, op2), \
svuint64_t: svcmplt_u64)(pg, op1, op2), \
svint32_t: svcmplt_f32)(pg, op1, op2), \
svint64_t: svcmplt_f64)(pg, op1, op2))
#endif
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svcmplt
#define svcmplt(pg, op1, op2) simde_svcmplt((pg), (op1), (op2))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_CMPLT_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/types.h | .h | 34,178 | 916 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
/* TODO: SVE2 is going to be a bit awkward with this setup. We currently
* either use SVE vectors or assume that the vector length is known at
* compile-time. For CPUs which provide SVE but not SVE2 we're going
* to be getting scalable vectors, so we may need to loop through them.
*
* Currently I'm thinking we'll have a separate function for non-SVE
* types. We can call that function in a loop from an SVE version,
* and we can call it once from a resolver.
*
* Unfortunately this is going to mean a lot of boilerplate for SVE,
* which already has several variants of a lot of functions (*_z, *_m,
* etc.), plus overloaded functions in C++ and generic selectors in C.
*
* Anyways, all this means that we're going to need to always define
* the portable types.
*
* The good news is that at least we don't have to deal with
* to/from_private functions; since the no-SVE versions will only be
* called with non-SVE params. */
#if !defined(SIMDE_ARM_SVE_TYPES_H)
#define SIMDE_ARM_SVE_TYPES_H
#include "../../simde-common.h"
#include "../../simde-f16.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_VECTOR_SUBSCRIPT)
#define SIMDE_ARM_SVE_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name SIMDE_VECTOR(Vector_Size)
#else
#define SIMDE_ARM_SVE_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name[(Vector_Size) / sizeof(Element_Type)]
#endif
#if defined(SIMDE_ARM_SVE_NATIVE)
typedef svbool_t simde_svbool_t;
typedef svint8_t simde_svint8_t;
typedef svint16_t simde_svint16_t;
typedef svint32_t simde_svint32_t;
typedef svint64_t simde_svint64_t;
typedef svuint8_t simde_svuint8_t;
typedef svuint16_t simde_svuint16_t;
typedef svuint32_t simde_svuint32_t;
typedef svuint64_t simde_svuint64_t;
#if defined(__ARM_FEATURE_SVE_BF16)
typedef svbfloat16_t simde_svbfloat16_t;
#endif
typedef svfloat16_t simde_svfloat16_t;
typedef svfloat32_t simde_svfloat32_t;
typedef svfloat64_t simde_svfloat64_t;
typedef float32_t simde_float32_t;
typedef float64_t simde_float64_t;
#else
#if SIMDE_NATURAL_VECTOR_SIZE > 0
#define SIMDE_ARM_SVE_VECTOR_SIZE SIMDE_NATURAL_VECTOR_SIZE
#else
#define SIMDE_ARM_SVE_VECTOR_SIZE (128)
#endif
typedef simde_float32 simde_float32_t;
typedef simde_float64 simde_float64_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(int8_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x16_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svint8_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(int16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x8_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svint16_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(int32_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svint32_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(int64_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int64x2_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(signed long long int) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svint64_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(uint8_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x16_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svuint8_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x8_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svuint16_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(uint32_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svuint32_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(uint64_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint64x2_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long int) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svuint64_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
float16x8_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svfloat16_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svbfloat16_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(simde_float32, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512 m512;
#endif
#if defined(SIMDE_X86_AVX_NATIVE)
__m256 m256[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256)];
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
__m128 m128[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(float) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svfloat32_t;
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR(simde_float64, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512d m512d;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256d m256d[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256d)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128d m128d[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128d)];
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
float64x2_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(double) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svfloat64_t;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
typedef struct {
__mmask64 value;
int type;
} simde_svbool_t;
#if defined(__BMI2__)
static const uint64_t simde_arm_sve_mask_bp_lo_ = UINT64_C(0x5555555555555555);
static const uint64_t simde_arm_sve_mask_bp_hi_ = UINT64_C(0xaaaaaaaaaaaaaaaa);
SIMDE_FUNCTION_ATTRIBUTES
__mmask64
simde_arm_sve_mmask32_to_mmask64(__mmask32 m) {
return HEDLEY_STATIC_CAST(__mmask64,
_pdep_u64(HEDLEY_STATIC_CAST(uint64_t, m), simde_arm_sve_mask_bp_lo_) |
_pdep_u64(HEDLEY_STATIC_CAST(uint64_t, m), simde_arm_sve_mask_bp_hi_));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask32
simde_arm_sve_mmask16_to_mmask32(__mmask16 m) {
return HEDLEY_STATIC_CAST(__mmask32,
_pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) |
_pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_)));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask16
simde_arm_sve_mmask8_to_mmask16(__mmask8 m) {
return HEDLEY_STATIC_CAST(__mmask16,
_pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) |
_pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_)));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask8
simde_arm_sve_mmask4_to_mmask8(__mmask8 m) {
return HEDLEY_STATIC_CAST(__mmask8,
_pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) |
_pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_)));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask32
simde_arm_sve_mmask64_to_mmask32(__mmask64 m) {
return HEDLEY_STATIC_CAST(__mmask32,
_pext_u64(HEDLEY_STATIC_CAST(uint64_t, m), HEDLEY_STATIC_CAST(uint64_t, simde_arm_sve_mask_bp_lo_)) &
_pext_u64(HEDLEY_STATIC_CAST(uint64_t, m), HEDLEY_STATIC_CAST(uint64_t, simde_arm_sve_mask_bp_hi_)));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask16
simde_arm_sve_mmask32_to_mmask16(__mmask32 m) {
return HEDLEY_STATIC_CAST(__mmask16,
_pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) &
_pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_)));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask8
simde_arm_sve_mmask16_to_mmask8(__mmask16 m) {
return HEDLEY_STATIC_CAST(__mmask8,
_pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) &
_pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_)));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask8
simde_arm_sve_mmask8_to_mmask4(__mmask8 m) {
return HEDLEY_STATIC_CAST(__mmask8,
_pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) &
_pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_)));
}
#else
SIMDE_FUNCTION_ATTRIBUTES
__mmask64
simde_arm_sve_mmask32_to_mmask64(__mmask32 m) {
uint64_t e = HEDLEY_STATIC_CAST(uint64_t, m);
uint64_t o = HEDLEY_STATIC_CAST(uint64_t, m);
e = (e | (e << 16)) & UINT64_C(0x0000ffff0000ffff);
e = (e | (e << 8)) & UINT64_C(0x00ff00ff00ff00ff);
e = (e | (e << 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
e = (e | (e << 2)) & UINT64_C(0x3333333333333333);
e = (e | (e << 1)) & UINT64_C(0x5555555555555555);
o = (o | (o << 16)) & UINT64_C(0x0000ffff0000ffff);
o = (o | (o << 8)) & UINT64_C(0x00ff00ff00ff00ff);
o = (o | (o << 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
o = (o | (o << 2)) & UINT64_C(0x3333333333333333);
o = (o | (o << 1)) & UINT64_C(0x5555555555555555);
return HEDLEY_STATIC_CAST(__mmask64, e | (o << 1));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask32
simde_arm_sve_mmask16_to_mmask32(__mmask16 m) {
uint32_t e = HEDLEY_STATIC_CAST(uint32_t, m);
uint32_t o = HEDLEY_STATIC_CAST(uint32_t, m);
e = (e | (e << 8)) & UINT32_C(0x00FF00FF);
e = (e | (e << 4)) & UINT32_C(0x0F0F0F0F);
e = (e | (e << 2)) & UINT32_C(0x33333333);
e = (e | (e << 1)) & UINT32_C(0x55555555);
o = (o | (o << 8)) & UINT32_C(0x00FF00FF);
o = (o | (o << 4)) & UINT32_C(0x0F0F0F0F);
o = (o | (o << 2)) & UINT32_C(0x33333333);
o = (o | (o << 1)) & UINT32_C(0x55555555);
return HEDLEY_STATIC_CAST(__mmask32, e | (o << 1));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask16
simde_arm_sve_mmask8_to_mmask16(__mmask8 m) {
uint16_t e = HEDLEY_STATIC_CAST(uint16_t, m);
uint16_t o = HEDLEY_STATIC_CAST(uint16_t, m);
e = (e | (e << 4)) & UINT16_C(0x0f0f);
e = (e | (e << 2)) & UINT16_C(0x3333);
e = (e | (e << 1)) & UINT16_C(0x5555);
o = (o | (o << 4)) & UINT16_C(0x0f0f);
o = (o | (o << 2)) & UINT16_C(0x3333);
o = (o | (o << 1)) & UINT16_C(0x5555);
return HEDLEY_STATIC_CAST(uint16_t, e | (o << 1));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask8
simde_arm_sve_mmask4_to_mmask8(__mmask8 m) {
uint8_t e = HEDLEY_STATIC_CAST(uint8_t, m);
uint8_t o = HEDLEY_STATIC_CAST(uint8_t, m);
e = (e | (e << 2)) & UINT8_C(0x33);
e = (e | (e << 1)) & UINT8_C(0x55);
o = (o | (o << 2)) & UINT8_C(0x33);
o = (o | (o << 1)) & UINT8_C(0x55);
return HEDLEY_STATIC_CAST(uint8_t, e | (o << 1));
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask32
simde_arm_sve_mmask64_to_mmask32(__mmask64 m) {
uint64_t l = (HEDLEY_STATIC_CAST(uint64_t, m) ) & UINT64_C(0x5555555555555555);
l = (l | (l >> 1)) & UINT64_C(0x3333333333333333);
l = (l | (l >> 2)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
l = (l | (l >> 4)) & UINT64_C(0x00ff00ff00ff00ff);
l = (l | (l >> 8)) & UINT64_C(0x0000ffff0000ffff);
uint64_t h = (HEDLEY_STATIC_CAST(uint64_t, m) >> 1) & UINT64_C(0x5555555555555555);
h = (h | (h >> 1)) & UINT64_C(0x3333333333333333);
h = (h | (h >> 2)) & UINT64_C(0x0f0f0f0f0f0f0f0f);
h = (h | (h >> 4)) & UINT64_C(0x00ff00ff00ff00ff);
h = (h | (h >> 8)) & UINT64_C(0x0000ffff0000ffff);
return HEDLEY_STATIC_CAST(uint32_t, l & h);
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask16
simde_arm_sve_mmask32_to_mmask16(__mmask32 m) {
uint32_t l = (HEDLEY_STATIC_CAST(uint32_t, m) ) & UINT32_C(0x55555555);
l = (l | (l >> 1)) & UINT32_C(0x33333333);
l = (l | (l >> 2)) & UINT32_C(0x0f0f0f0f);
l = (l | (l >> 4)) & UINT32_C(0x00ff00ff);
l = (l | (l >> 8)) & UINT32_C(0x0000ffff);
uint32_t h = (HEDLEY_STATIC_CAST(uint32_t, m) >> 1) & UINT32_C(0x55555555);
h = (h | (h >> 1)) & UINT32_C(0x33333333);
h = (h | (h >> 2)) & UINT32_C(0x0f0f0f0f);
h = (h | (h >> 4)) & UINT32_C(0x00ff00ff);
h = (h | (h >> 8)) & UINT32_C(0x0000ffff);
return HEDLEY_STATIC_CAST(uint16_t, l & h);
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask8
simde_arm_sve_mmask16_to_mmask8(__mmask16 m) {
uint16_t l = (HEDLEY_STATIC_CAST(uint16_t, m) ) & UINT16_C(0x5555);
l = (l | (l >> 1)) & UINT16_C(0x3333);
l = (l | (l >> 2)) & UINT16_C(0x0f0f);
l = (l | (l >> 4)) & UINT16_C(0x00ff);
uint16_t h = (HEDLEY_STATIC_CAST(uint16_t, m) >> 1) & UINT16_C(0x5555);
h = (h | (h >> 1)) & UINT16_C(0x3333);
h = (h | (h >> 2)) & UINT16_C(0x0f0f);
h = (h | (h >> 4)) & UINT16_C(0x00ff);
return HEDLEY_STATIC_CAST(uint8_t, l & h);
}
SIMDE_FUNCTION_ATTRIBUTES
__mmask8
simde_arm_sve_mmask8_to_mmask4(__mmask8 m) {
uint8_t l = (HEDLEY_STATIC_CAST(uint8_t, m) ) & UINT8_C(0x55);
l = (l | (l >> 1)) & UINT8_C(0x33);
l = (l | (l >> 2)) & UINT8_C(0x0f);
l = (l | (l >> 4)) & UINT8_C(0xff);
uint8_t h = (HEDLEY_STATIC_CAST(uint8_t, m) >> 1) & UINT8_C(0x55);
h = (h | (h >> 1)) & UINT8_C(0x33);
h = (h | (h >> 2)) & UINT8_C(0x0f);
h = (h | (h >> 4)) & UINT8_C(0xff);
return HEDLEY_STATIC_CAST(uint8_t, l & h);
}
#endif
typedef enum {
SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64,
SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32,
SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16,
SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8,
#if SIMDE_ARM_SVE_VECTOR_SIZE < 512
SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4,
#endif
} simde_svbool_mmask_type;
HEDLEY_CONST HEDLEY_ALWAYS_INLINE
simde_svbool_t
simde_svbool_from_mmask64(__mmask64 mi) {
simde_svbool_t b;
b.value = HEDLEY_STATIC_CAST(__mmask64, mi);
b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64;
return b;
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
simde_svbool_t
simde_svbool_from_mmask32(__mmask32 mi) {
simde_svbool_t b;
b.value = HEDLEY_STATIC_CAST(__mmask64, mi);
b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32;
return b;
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
simde_svbool_t
simde_svbool_from_mmask16(__mmask16 mi) {
simde_svbool_t b;
b.value = HEDLEY_STATIC_CAST(__mmask64, mi);
b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16;
return b;
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
simde_svbool_t
simde_svbool_from_mmask8(__mmask8 mi) {
simde_svbool_t b;
b.value = HEDLEY_STATIC_CAST(__mmask64, mi);
b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8;
return b;
}
#if SIMDE_ARM_SVE_VECTOR_SIZE < 512
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
simde_svbool_t
simde_svbool_from_mmask4(__mmask8 mi) {
simde_svbool_t b;
b.value = HEDLEY_STATIC_CAST(__mmask64, mi);
b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4;
return b;
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
__mmask8
simde_svbool_to_mmask4(simde_svbool_t b) {
__mmask64 tmp = b.value;
switch (b.type) {
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask16(HEDLEY_STATIC_CAST(__mmask32, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask8(HEDLEY_STATIC_CAST(__mmask16, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask4(HEDLEY_STATIC_CAST(__mmask8, tmp)));
}
return HEDLEY_STATIC_CAST(__mmask8, tmp);
}
#endif
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
__mmask8
simde_svbool_to_mmask8(simde_svbool_t b) {
__mmask64 tmp = b.value;
switch (b.type) {
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask16(HEDLEY_STATIC_CAST(__mmask32, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask8(HEDLEY_STATIC_CAST(__mmask16, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8:
break;
#if SIMDE_ARM_SVE_VECTOR_SIZE < 512
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp)));
#endif
}
return HEDLEY_STATIC_CAST(__mmask8, tmp);
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
__mmask16
simde_svbool_to_mmask16(simde_svbool_t b) {
__mmask64 tmp = b.value;
switch (b.type) {
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask16(HEDLEY_STATIC_CAST(__mmask32, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16:
break;
#if SIMDE_ARM_SVE_VECTOR_SIZE < 512
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp)));
HEDLEY_FALL_THROUGH;
#endif
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask16(HEDLEY_STATIC_CAST(__mmask8, tmp)));
}
return HEDLEY_STATIC_CAST(__mmask16, tmp);
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
__mmask32
simde_svbool_to_mmask32(simde_svbool_t b) {
__mmask64 tmp = b.value;
switch (b.type) {
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32:
break;
#if SIMDE_ARM_SVE_VECTOR_SIZE < 512
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp)));
HEDLEY_FALL_THROUGH;
#endif
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask16(HEDLEY_STATIC_CAST(__mmask8, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask32(HEDLEY_STATIC_CAST(__mmask16, tmp)));
}
return HEDLEY_STATIC_CAST(__mmask32, tmp);
}
SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST
__mmask64
simde_svbool_to_mmask64(simde_svbool_t b) {
__mmask64 tmp = b.value;
switch (b.type) {
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64:
break;
#if SIMDE_ARM_SVE_VECTOR_SIZE < 512
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp)));
HEDLEY_FALL_THROUGH;
#endif
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask16(HEDLEY_STATIC_CAST(__mmask8, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask32(HEDLEY_STATIC_CAST(__mmask16, tmp)));
HEDLEY_FALL_THROUGH;
case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32:
tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask64(HEDLEY_STATIC_CAST(__mmask32, tmp)));
}
return HEDLEY_STATIC_CAST(__mmask64, tmp);
}
/* TODO: we're going to need need svbool_to/from_svint* functions
* for when we can't implement a function using AVX-512. */
#else
typedef union {
SIMDE_ARM_SVE_DECLARE_VECTOR( int8_t, values_i8, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR( int16_t, values_i16, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR( int32_t, values_i32, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR( int64_t, values_i64, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR( uint8_t, values_u8, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values_u16, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR(uint32_t, values_u32, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
SIMDE_ARM_SVE_DECLARE_VECTOR(uint64_t, values_u64, (SIMDE_ARM_SVE_VECTOR_SIZE / 8));
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
__m512i m512i;
#endif
#if defined(SIMDE_X86_AVX2_NATIVE)
__m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)];
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x16_t neon_i8;
int16x8_t neon_i16;
int32x4_t neon_i32;
int64x2_t neon_i64;
uint8x16_t neon_u8;
uint16x8_t neon_u16;
uint32x4_t neon_u32;
uint64x2_t neon_u64;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) altivec_b8;
SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL short) altivec_b16;
SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) altivec_b32;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL long long) altivec_b64;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_svbool_t;
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint8, simde_svint8_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_from_svint8, simde_svbool_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint16, simde_svint16_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svint16, simde_svbool_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint32, simde_svint32_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svint32, simde_svbool_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint64, simde_svint64_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svint64, simde_svbool_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint8, simde_svuint8_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint8, simde_svbool_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint16, simde_svuint16_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint16, simde_svbool_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint32, simde_svuint32_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint32, simde_svbool_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint64, simde_svuint64_t, simde_svbool_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint64, simde_svbool_t, simde_svuint64_t)
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
typedef simde_svbool_t svbool_t;
typedef simde_svint8_t svint8_t;
typedef simde_svint16_t svint16_t;
typedef simde_svint32_t svint32_t;
typedef simde_svint64_t svint64_t;
typedef simde_svuint8_t svuint8_t;
typedef simde_svuint16_t svuint16_t;
typedef simde_svuint32_t svuint32_t;
typedef simde_svuint64_t svuint64_t;
typedef simde_svfloat16_t svfloat16_t;
typedef simde_svbfloat16_t svbfloat16_t;
typedef simde_svfloat32_t svfloat32_t;
typedef simde_svfloat64_t svfloat64_t;
#endif
#endif
#if !defined(SIMDE_ARM_SVE_DEFAULT_UNDEFINED_SUFFIX)
#define SIMDE_ARM_SVE_DEFAULT_UNDEFINED_SUFFIX z
#endif
#define SIMDE_ARM_SVE_UNDEFINED_SYMBOL(name) HEDLEY_CONCAT3(name, _, SIMDE_ARM_SVE_DEFAULT_UNDEFINED_SUFFIX)
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
/* These are going to be used pretty much everywhere since they are
* used to create the loops SVE requires. Since we want to support
* only including the files you need instead of just using sve.h,
* it's helpful to pull these in here. While this file is called
* arm/sve/types.h, it might be better to think of it more as
* arm/sve/common.h. */
#include "cnt.h"
#include "ld1.h"
#include "ptest.h"
#include "ptrue.h"
#include "st1.h"
#include "whilelt.h"
#endif /* SIMDE_ARM_SVE_TYPES_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/whilelt.h | .h | 34,501 | 844 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_WHILELT_H)
#define SIMDE_ARM_SVE_WHILELT_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b8_s32(int32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b8_s32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0);
if (HEDLEY_UNLIKELY(remaining < 64)) {
r >>= 64 - remaining;
}
return simde_svbool_from_mmask64(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#else
simde_svint8_t r;
int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0);
}
return simde_svbool_from_svint8(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b8_s32
#define svwhilelt_b8_s32(op1, op2) simde_svwhilelt_b8_s32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b16_s32(int32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b16_s32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#else
simde_svint16_t r;
int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0);
}
return simde_svbool_from_svint16(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b16_s32
#define svwhilelt_b16_s32(op1, op2) simde_svwhilelt_b16_s32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b32_s32(int32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b32_s32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#else
simde_svint32_t r;
int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~INT32_C(0) : INT32_C(0);
}
return simde_svbool_from_svint32(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b32_s32
#define svwhilelt_b32_s32(op1, op2) simde_svwhilelt_b32_s32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b64_s32(int32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b64_s32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0));
int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f);
if (HEDLEY_UNLIKELY(remaining < 4)) {
r >>= 4 - remaining;
}
return simde_svbool_from_mmask4(r);
#else
simde_svint64_t r;
int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0);
}
return simde_svbool_from_svint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b64_s32
#define svwhilelt_b64_s32(op1, op2) simde_svwhilelt_b64_s32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b8_s64(int64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b8_s64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0);
if (HEDLEY_UNLIKELY(remaining < 64)) {
r >>= 64 - remaining;
}
return simde_svbool_from_mmask64(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#else
simde_svint8_t r;
int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0);
}
return simde_svbool_from_svint8(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b8_s64
#define svwhilelt_b8_s64(op1, op2) simde_svwhilelt_b8_s64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b16_s64(int64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b16_s64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#else
simde_svint16_t r;
int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0);
}
return simde_svbool_from_svint16(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b16_s64
#define svwhilelt_b16_s64(op1, op2) simde_svwhilelt_b16_s64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b32_s64(int64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b32_s64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#else
simde_svint64_t r;
int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0);
}
return simde_svbool_from_svint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b32_s64
#define svwhilelt_b32_s64(op1, op2) simde_svwhilelt_b32_s64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b64_s64(int64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b64_s64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0));
int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f);
if (HEDLEY_UNLIKELY(remaining < 4)) {
r >>= 4 - remaining;
}
return simde_svbool_from_mmask4(r);
#else
simde_svint64_t r;
int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0);
}
return simde_svbool_from_svint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b64_s64
#define svwhilelt_b64_s64(op1, op2) simde_svwhilelt_b64_s64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b8_u32(uint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b8_u32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0);
if (HEDLEY_UNLIKELY(remaining < 64)) {
r >>= 64 - remaining;
}
return simde_svbool_from_mmask64(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#else
simde_svint8_t r;
uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0);
}
return simde_svbool_from_svint8(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b8_u32
#define svwhilelt_b8_u32(op1, op2) simde_svwhilelt_b8_u32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b16_u32(uint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b16_u32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#else
simde_svint16_t r;
uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0);
}
return simde_svbool_from_svint16(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b16_u32
#define svwhilelt_b16_u32(op1, op2) simde_svwhilelt_b16_u32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b32_u32(uint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b32_u32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#else
simde_svuint32_t r;
uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT32_C(0) : UINT32_C(0);
}
return simde_svbool_from_svuint32(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b32_u32
#define svwhilelt_b32_u32(op1, op2) simde_svwhilelt_b32_u32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b64_u32(uint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b64_u32(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0));
uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f);
if (HEDLEY_UNLIKELY(remaining < 4)) {
r >>= 4 - remaining;
}
return simde_svbool_from_mmask4(r);
#else
simde_svint64_t r;
uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0);
}
return simde_svbool_from_svint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b64_u32
#define svwhilelt_b64_u32(op1, op2) simde_svwhilelt_b64_u32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b8_u64(uint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b8_u64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0);
if (HEDLEY_UNLIKELY(remaining < 64)) {
r >>= 64 - remaining;
}
return simde_svbool_from_mmask64(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT64_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#else
simde_svint8_t r;
uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0);
}
return simde_svbool_from_svint8(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b8_u64
#define svwhilelt_b8_u64(op1, op2) simde_svwhilelt_b8_u64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b16_u64(uint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b16_u64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0));
if (HEDLEY_UNLIKELY(remaining < 32)) {
r >>= 32 - remaining;
}
return simde_svbool_from_mmask32(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#else
simde_svint16_t r;
uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0);
}
return simde_svbool_from_svint16(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b16_u64
#define svwhilelt_b16_u64(op1, op2) simde_svwhilelt_b16_u64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b32_u64(uint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b32_u64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0));
if (HEDLEY_UNLIKELY(remaining < 16)) {
r >>= 16 - remaining;
}
return simde_svbool_from_mmask16(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#else
simde_svuint64_t r;
uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~UINT64_C(0) : UINT64_C(0);
}
return simde_svbool_from_svuint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b32_u64
#define svwhilelt_b32_u64(op1, op2) simde_svwhilelt_b32_u64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svwhilelt_b64_u64(uint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svwhilelt_b64_u64(op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0));
if (HEDLEY_UNLIKELY(remaining < 8)) {
r >>= 8 - remaining;
}
return simde_svbool_from_mmask8(r);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_UNLIKELY(op1 >= op2))
return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0));
uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
__mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f);
if (HEDLEY_UNLIKELY(remaining < 4)) {
r >>= 4 - remaining;
}
return simde_svbool_from_mmask4(r);
#else
simde_svint64_t r;
uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1));
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0);
}
return simde_svbool_from_svint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svwhilelt_b64_u64
#define svwhilelt_b64_u64(op1, op2) simde_svwhilelt_b64_u64(op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 ( int32_t op1, int32_t op2) { return simde_svwhilelt_b8_s32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 ( int64_t op1, int64_t op2) { return simde_svwhilelt_b8_s64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 (uint32_t op1, uint32_t op2) { return simde_svwhilelt_b8_u32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 (uint64_t op1, uint64_t op2) { return simde_svwhilelt_b8_u64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16( int32_t op1, int32_t op2) { return simde_svwhilelt_b16_s32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16( int64_t op1, int64_t op2) { return simde_svwhilelt_b16_s64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16(uint32_t op1, uint32_t op2) { return simde_svwhilelt_b16_u32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16(uint64_t op1, uint64_t op2) { return simde_svwhilelt_b16_u64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32( int32_t op1, int32_t op2) { return simde_svwhilelt_b32_s32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32( int64_t op1, int64_t op2) { return simde_svwhilelt_b32_s64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32(uint32_t op1, uint32_t op2) { return simde_svwhilelt_b32_u32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32(uint64_t op1, uint64_t op2) { return simde_svwhilelt_b32_u64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64( int32_t op1, int32_t op2) { return simde_svwhilelt_b64_s32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64( int64_t op1, int64_t op2) { return simde_svwhilelt_b64_s64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64(uint32_t op1, uint32_t op2) { return simde_svwhilelt_b64_u32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64(uint64_t op1, uint64_t op2) { return simde_svwhilelt_b64_u64(op1, op2); }
#elif defined(SIMDE_GENERIC_)
#define simde_svwhilelt_b8(op1, op2) \
(SIMDE_GENERIC_((op1), \
int32_t: simde_svwhilelt_b8_s32, \
uint32_t: simde_svwhilelt_b8_u32, \
int64_t: simde_svwhilelt_b8_s64, \
uint64_t: simde_svwhilelt_b8_u64)((op1), (op2)))
#define simde_svwhilelt_b16(op1, op2) \
(SIMDE_GENERIC_((op1), \
int32_t: simde_svwhilelt_b16_s32, \
uint32_t: simde_svwhilelt_b16_u32, \
int64_t: simde_svwhilelt_b16_s64, \
uint64_t: simde_svwhilelt_b16_u64)((op1), (op2)))
#define simde_svwhilelt_b32(op1, op2) \
(SIMDE_GENERIC_((op1), \
int32_t: simde_svwhilelt_b32_s32, \
uint32_t: simde_svwhilelt_b32_u32, \
int64_t: simde_svwhilelt_b32_s64, \
uint64_t: simde_svwhilelt_b32_u64)((op1), (op2)))
#define simde_svwhilelt_b64(op1, op2) \
(SIMDE_GENERIC_((op1), \
int32_t: simde_svwhilelt_b64_s32, \
uint32_t: simde_svwhilelt_b64_u32, \
int64_t: simde_svwhilelt_b64_s64, \
uint64_t: simde_svwhilelt_b64_u64)((op1), (op2)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svwhilelt_b8
#undef svwhilelt_b16
#undef svwhilelt_b32
#undef svwhilelt_b64
#define svwhilelt_b8(op1, op2) simde_svwhilelt_b8((op1), (op2))
#define svwhilelt_b16(op1, op2) simde_svwhilelt_b16((op1), (op2))
#define svwhilelt_b32(op1, op2) simde_svwhilelt_b32((op1), (op2))
#define svwhilelt_b64(op1, op2) simde_svwhilelt_b64((op1), (op2))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_WHILELT_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/reinterpret.h | .h | 66,380 | 755 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_REINTERPRET_H)
#define SIMDE_ARM_SVE_REINTERPRET_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
#if defined(SIMDE_ARM_SVE_NATIVE)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_s16( simde_svint16_t op) { return svreinterpret_s8_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_s32( simde_svint32_t op) { return svreinterpret_s8_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_s64( simde_svint64_t op) { return svreinterpret_s8_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u8( simde_svuint8_t op) { return svreinterpret_s8_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u16( simde_svuint16_t op) { return svreinterpret_s8_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u32( simde_svuint32_t op) { return svreinterpret_s8_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u64( simde_svuint64_t op) { return svreinterpret_s8_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_f16( simde_svfloat16_t op) { return svreinterpret_s8_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_f32( simde_svfloat32_t op) { return svreinterpret_s8_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_f64( simde_svfloat64_t op) { return svreinterpret_s8_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_s8( simde_svint8_t op) { return svreinterpret_s16_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_s32( simde_svint32_t op) { return svreinterpret_s16_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_s64( simde_svint64_t op) { return svreinterpret_s16_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u8( simde_svuint8_t op) { return svreinterpret_s16_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u16( simde_svuint16_t op) { return svreinterpret_s16_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u32( simde_svuint32_t op) { return svreinterpret_s16_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u64( simde_svuint64_t op) { return svreinterpret_s16_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_f16( simde_svfloat16_t op) { return svreinterpret_s16_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_f32( simde_svfloat32_t op) { return svreinterpret_s16_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_f64( simde_svfloat64_t op) { return svreinterpret_s16_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_s8( simde_svint8_t op) { return svreinterpret_s32_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_s16( simde_svint16_t op) { return svreinterpret_s32_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_s64( simde_svint64_t op) { return svreinterpret_s32_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u8( simde_svuint8_t op) { return svreinterpret_s32_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u16( simde_svuint16_t op) { return svreinterpret_s32_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u32( simde_svuint32_t op) { return svreinterpret_s32_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u64( simde_svuint64_t op) { return svreinterpret_s32_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_f16( simde_svfloat16_t op) { return svreinterpret_s32_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_f32( simde_svfloat32_t op) { return svreinterpret_s32_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_f64( simde_svfloat64_t op) { return svreinterpret_s32_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_s8( simde_svint8_t op) { return svreinterpret_s64_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_s16( simde_svint16_t op) { return svreinterpret_s64_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_s32( simde_svint32_t op) { return svreinterpret_s64_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u8( simde_svuint8_t op) { return svreinterpret_s64_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u16( simde_svuint16_t op) { return svreinterpret_s64_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u32( simde_svuint32_t op) { return svreinterpret_s64_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u64( simde_svuint64_t op) { return svreinterpret_s64_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_f16( simde_svfloat16_t op) { return svreinterpret_s64_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_f32( simde_svfloat32_t op) { return svreinterpret_s64_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_f64( simde_svfloat64_t op) { return svreinterpret_s64_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s8( simde_svint8_t op) { return svreinterpret_u8_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s16( simde_svint16_t op) { return svreinterpret_u8_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s32( simde_svint32_t op) { return svreinterpret_u8_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s64( simde_svint64_t op) { return svreinterpret_u8_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_u16( simde_svuint16_t op) { return svreinterpret_u8_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_u32( simde_svuint32_t op) { return svreinterpret_u8_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_u64( simde_svuint64_t op) { return svreinterpret_u8_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_f16( simde_svfloat16_t op) { return svreinterpret_u8_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_f32( simde_svfloat32_t op) { return svreinterpret_u8_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_f64( simde_svfloat64_t op) { return svreinterpret_u8_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s8( simde_svint8_t op) { return svreinterpret_u16_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s16( simde_svint16_t op) { return svreinterpret_u16_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s32( simde_svint32_t op) { return svreinterpret_u16_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s64( simde_svint64_t op) { return svreinterpret_u16_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_u8( simde_svuint8_t op) { return svreinterpret_u16_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_u32( simde_svuint32_t op) { return svreinterpret_u16_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_u64( simde_svuint64_t op) { return svreinterpret_u16_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_f16( simde_svfloat16_t op) { return svreinterpret_u16_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_f32( simde_svfloat32_t op) { return svreinterpret_u16_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_f64( simde_svfloat64_t op) { return svreinterpret_u16_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s8( simde_svint8_t op) { return svreinterpret_u32_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s16( simde_svint16_t op) { return svreinterpret_u32_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s32( simde_svint32_t op) { return svreinterpret_u32_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s64( simde_svint64_t op) { return svreinterpret_u32_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_u8( simde_svuint8_t op) { return svreinterpret_u32_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_u16( simde_svuint16_t op) { return svreinterpret_u32_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_u64( simde_svuint64_t op) { return svreinterpret_u32_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_f16( simde_svfloat16_t op) { return svreinterpret_u32_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_f32( simde_svfloat32_t op) { return svreinterpret_u32_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_f64( simde_svfloat64_t op) { return svreinterpret_u32_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s8( simde_svint8_t op) { return svreinterpret_u64_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s16( simde_svint16_t op) { return svreinterpret_u64_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s32( simde_svint32_t op) { return svreinterpret_u64_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s64( simde_svint64_t op) { return svreinterpret_u64_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_u8( simde_svuint8_t op) { return svreinterpret_u64_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_u16( simde_svuint16_t op) { return svreinterpret_u64_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_u32( simde_svuint32_t op) { return svreinterpret_u64_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_f16( simde_svfloat16_t op) { return svreinterpret_u64_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_f32( simde_svfloat32_t op) { return svreinterpret_u64_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_f64( simde_svfloat64_t op) { return svreinterpret_u64_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s8( simde_svint8_t op) { return svreinterpret_f16_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s16( simde_svint16_t op) { return svreinterpret_f16_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s32( simde_svint32_t op) { return svreinterpret_f16_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s64( simde_svint64_t op) { return svreinterpret_f16_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u8( simde_svuint8_t op) { return svreinterpret_f16_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u16( simde_svuint16_t op) { return svreinterpret_f16_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u32( simde_svuint32_t op) { return svreinterpret_f16_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u64( simde_svuint64_t op) { return svreinterpret_f16_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_f32( simde_svfloat32_t op) { return svreinterpret_f16_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_f64( simde_svfloat64_t op) { return svreinterpret_f16_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s8( simde_svint8_t op) { return svreinterpret_f32_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s16( simde_svint16_t op) { return svreinterpret_f32_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s32( simde_svint32_t op) { return svreinterpret_f32_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s64( simde_svint64_t op) { return svreinterpret_f32_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u8( simde_svuint8_t op) { return svreinterpret_f32_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u16( simde_svuint16_t op) { return svreinterpret_f32_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u32( simde_svuint32_t op) { return svreinterpret_f32_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u64( simde_svuint64_t op) { return svreinterpret_f32_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_f16( simde_svfloat16_t op) { return svreinterpret_f32_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_f64( simde_svfloat64_t op) { return svreinterpret_f32_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s8( simde_svint8_t op) { return svreinterpret_f64_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s16( simde_svint16_t op) { return svreinterpret_f64_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s32( simde_svint32_t op) { return svreinterpret_f64_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s64( simde_svint64_t op) { return svreinterpret_f64_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u8( simde_svuint8_t op) { return svreinterpret_f64_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u16( simde_svuint16_t op) { return svreinterpret_f64_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u32( simde_svuint32_t op) { return svreinterpret_f64_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u64( simde_svuint64_t op) { return svreinterpret_f64_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_f16( simde_svfloat16_t op) { return svreinterpret_f64_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_f32( simde_svfloat32_t op) { return svreinterpret_f64_f32(op); }
#else
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_s16, simde_svint8_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_s32, simde_svint8_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_s64, simde_svint8_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u8, simde_svint8_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u16, simde_svint8_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u32, simde_svint8_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u64, simde_svint8_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_f16, simde_svint8_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_f32, simde_svint8_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_f64, simde_svint8_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_s8, simde_svint16_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_s32, simde_svint16_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_s64, simde_svint16_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u8, simde_svint16_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u16, simde_svint16_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u32, simde_svint16_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u64, simde_svint16_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_f16, simde_svint16_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_f32, simde_svint16_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_f64, simde_svint16_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_s8, simde_svint32_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_s16, simde_svint32_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_s64, simde_svint32_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u8, simde_svint32_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u16, simde_svint32_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u32, simde_svint32_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u64, simde_svint32_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_f16, simde_svint32_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_f32, simde_svint32_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_f64, simde_svint32_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_s8, simde_svint64_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_s16, simde_svint64_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_s32, simde_svint64_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u8, simde_svint64_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u16, simde_svint64_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u32, simde_svint64_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u64, simde_svint64_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_f16, simde_svint64_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_f32, simde_svint64_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_f64, simde_svint64_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s8, simde_svuint8_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s16, simde_svuint8_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s32, simde_svuint8_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s64, simde_svuint8_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_u16, simde_svuint8_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_u32, simde_svuint8_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_u64, simde_svuint8_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_f16, simde_svuint8_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_f32, simde_svuint8_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_f64, simde_svuint8_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s8, simde_svuint16_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s16, simde_svuint16_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s32, simde_svuint16_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s64, simde_svuint16_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_u8, simde_svuint16_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_u32, simde_svuint16_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_u64, simde_svuint16_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_f16, simde_svuint16_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_f32, simde_svuint16_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_f64, simde_svuint16_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s8, simde_svuint32_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s16, simde_svuint32_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s32, simde_svuint32_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s64, simde_svuint32_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_u8, simde_svuint32_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_u16, simde_svuint32_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_u64, simde_svuint32_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_f16, simde_svuint32_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_f32, simde_svuint32_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_f64, simde_svuint32_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s8, simde_svuint64_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s16, simde_svuint64_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s32, simde_svuint64_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s64, simde_svuint64_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_u8, simde_svuint64_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_u16, simde_svuint64_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_u32, simde_svuint64_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_f16, simde_svuint64_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_f32, simde_svuint64_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_f64, simde_svuint64_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s8, simde_svfloat16_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s16, simde_svfloat16_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s32, simde_svfloat16_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s64, simde_svfloat16_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u8, simde_svfloat16_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u16, simde_svfloat16_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u32, simde_svfloat16_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u64, simde_svfloat16_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_f32, simde_svfloat16_t, simde_svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_f64, simde_svfloat16_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s8, simde_svfloat32_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s16, simde_svfloat32_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s32, simde_svfloat32_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s64, simde_svfloat32_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u8, simde_svfloat32_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u16, simde_svfloat32_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u32, simde_svfloat32_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u64, simde_svfloat32_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_f16, simde_svfloat32_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_f64, simde_svfloat32_t, simde_svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s8, simde_svfloat64_t, simde_svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s16, simde_svfloat64_t, simde_svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s32, simde_svfloat64_t, simde_svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s64, simde_svfloat64_t, simde_svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u8, simde_svfloat64_t, simde_svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u16, simde_svfloat64_t, simde_svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u32, simde_svfloat64_t, simde_svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u64, simde_svfloat64_t, simde_svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_f16, simde_svfloat64_t, simde_svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_f32, simde_svfloat64_t, simde_svfloat32_t)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svint16_t op) { return simde_svreinterpret_s8_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svint32_t op) { return simde_svreinterpret_s8_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svint64_t op) { return simde_svreinterpret_s8_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint8_t op) { return simde_svreinterpret_s8_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint16_t op) { return simde_svreinterpret_s8_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint32_t op) { return simde_svreinterpret_s8_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint64_t op) { return simde_svreinterpret_s8_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svfloat16_t op) { return simde_svreinterpret_s8_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svfloat32_t op) { return simde_svreinterpret_s8_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svfloat64_t op) { return simde_svreinterpret_s8_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svint8_t op) { return simde_svreinterpret_s16_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svint32_t op) { return simde_svreinterpret_s16_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svint64_t op) { return simde_svreinterpret_s16_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint8_t op) { return simde_svreinterpret_s16_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint16_t op) { return simde_svreinterpret_s16_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint32_t op) { return simde_svreinterpret_s16_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint64_t op) { return simde_svreinterpret_s16_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svfloat16_t op) { return simde_svreinterpret_s16_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svfloat32_t op) { return simde_svreinterpret_s16_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svfloat64_t op) { return simde_svreinterpret_s16_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svint8_t op) { return simde_svreinterpret_s32_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svint16_t op) { return simde_svreinterpret_s32_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svint64_t op) { return simde_svreinterpret_s32_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint8_t op) { return simde_svreinterpret_s32_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint16_t op) { return simde_svreinterpret_s32_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint32_t op) { return simde_svreinterpret_s32_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint64_t op) { return simde_svreinterpret_s32_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svfloat16_t op) { return simde_svreinterpret_s32_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svfloat32_t op) { return simde_svreinterpret_s32_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svfloat64_t op) { return simde_svreinterpret_s32_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svint8_t op) { return simde_svreinterpret_s64_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svint16_t op) { return simde_svreinterpret_s64_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svint32_t op) { return simde_svreinterpret_s64_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint8_t op) { return simde_svreinterpret_s64_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint16_t op) { return simde_svreinterpret_s64_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint32_t op) { return simde_svreinterpret_s64_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint64_t op) { return simde_svreinterpret_s64_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svfloat16_t op) { return simde_svreinterpret_s64_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svfloat32_t op) { return simde_svreinterpret_s64_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svfloat64_t op) { return simde_svreinterpret_s64_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint8_t op) { return simde_svreinterpret_u8_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint16_t op) { return simde_svreinterpret_u8_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint32_t op) { return simde_svreinterpret_u8_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint64_t op) { return simde_svreinterpret_u8_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svuint16_t op) { return simde_svreinterpret_u8_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svuint32_t op) { return simde_svreinterpret_u8_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svuint64_t op) { return simde_svreinterpret_u8_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svfloat16_t op) { return simde_svreinterpret_u8_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svfloat32_t op) { return simde_svreinterpret_u8_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svfloat64_t op) { return simde_svreinterpret_u8_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint8_t op) { return simde_svreinterpret_u16_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint16_t op) { return simde_svreinterpret_u16_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint32_t op) { return simde_svreinterpret_u16_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint64_t op) { return simde_svreinterpret_u16_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svuint8_t op) { return simde_svreinterpret_u16_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svuint32_t op) { return simde_svreinterpret_u16_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svuint64_t op) { return simde_svreinterpret_u16_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svfloat16_t op) { return simde_svreinterpret_u16_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svfloat32_t op) { return simde_svreinterpret_u16_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svfloat64_t op) { return simde_svreinterpret_u16_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint8_t op) { return simde_svreinterpret_u32_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint16_t op) { return simde_svreinterpret_u32_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint32_t op) { return simde_svreinterpret_u32_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint64_t op) { return simde_svreinterpret_u32_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svuint8_t op) { return simde_svreinterpret_u32_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svuint16_t op) { return simde_svreinterpret_u32_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svuint64_t op) { return simde_svreinterpret_u32_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svfloat16_t op) { return simde_svreinterpret_u32_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svfloat32_t op) { return simde_svreinterpret_u32_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svfloat64_t op) { return simde_svreinterpret_u32_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint8_t op) { return simde_svreinterpret_u64_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint16_t op) { return simde_svreinterpret_u64_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint32_t op) { return simde_svreinterpret_u64_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint64_t op) { return simde_svreinterpret_u64_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svuint8_t op) { return simde_svreinterpret_u64_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svuint16_t op) { return simde_svreinterpret_u64_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svuint32_t op) { return simde_svreinterpret_u64_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svfloat16_t op) { return simde_svreinterpret_u64_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svfloat32_t op) { return simde_svreinterpret_u64_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svfloat64_t op) { return simde_svreinterpret_u64_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint8_t op) { return simde_svreinterpret_f16_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint16_t op) { return simde_svreinterpret_f16_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint32_t op) { return simde_svreinterpret_f16_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint64_t op) { return simde_svreinterpret_f16_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint8_t op) { return simde_svreinterpret_f16_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint16_t op) { return simde_svreinterpret_f16_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint32_t op) { return simde_svreinterpret_f16_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint64_t op) { return simde_svreinterpret_f16_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svfloat32_t op) { return simde_svreinterpret_f16_f32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svfloat64_t op) { return simde_svreinterpret_f16_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint8_t op) { return simde_svreinterpret_f32_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint16_t op) { return simde_svreinterpret_f32_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint32_t op) { return simde_svreinterpret_f32_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint64_t op) { return simde_svreinterpret_f32_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint8_t op) { return simde_svreinterpret_f32_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint16_t op) { return simde_svreinterpret_f32_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint32_t op) { return simde_svreinterpret_f32_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint64_t op) { return simde_svreinterpret_f32_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svfloat16_t op) { return simde_svreinterpret_f32_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svfloat64_t op) { return simde_svreinterpret_f32_f64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint8_t op) { return simde_svreinterpret_f64_s8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint16_t op) { return simde_svreinterpret_f64_s16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint32_t op) { return simde_svreinterpret_f64_s32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint64_t op) { return simde_svreinterpret_f64_s64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint8_t op) { return simde_svreinterpret_f64_u8(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint16_t op) { return simde_svreinterpret_f64_u16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint32_t op) { return simde_svreinterpret_f64_u32(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint64_t op) { return simde_svreinterpret_f64_u64(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svfloat16_t op) { return simde_svreinterpret_f64_f16(op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svfloat32_t op) { return simde_svreinterpret_f64_f32(op); }
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svfloat32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svfloat64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint64_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svfloat16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svfloat32_t)
#endif /* defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) */
#elif defined(SIMDE_GENERIC_)
#define simde_svreinterpret_f64(op) \
(_Generic((op), \
simde_svint16_t: simde_svreinterpret_s8_s16, \
simde_svint32_t: simde_svreinterpret_s8_s32, \
simde_svint64_t: simde_svreinterpret_s8_s64, \
simde_svuint8_t: simde_svreinterpret_s8_u8, \
simde_svuint16_t: simde_svreinterpret_s8_u16, \
simde_svuint32_t: simde_svreinterpret_s8_u32, \
simde_svuint64_t: simde_svreinterpret_s8_u64, \
simde_svfloat16_t: simde_svreinterpret_s8_f16, \
simde_svfloat32_t: simde_svreinterpret_s8_f32, \
simde_svfloat64_t: simde_svreinterpret_s8_f64)(op))
#define simde_svreinterpret_s8(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_s16_s8, \
simde_svint32_t: simde_svreinterpret_s16_s32, \
simde_svint64_t: simde_svreinterpret_s16_s64, \
simde_svuint8_t: simde_svreinterpret_s16_u8, \
simde_svuint16_t: simde_svreinterpret_s16_u16, \
simde_svuint32_t: simde_svreinterpret_s16_u32, \
simde_svuint64_t: simde_svreinterpret_s16_u64, \
simde_svfloat16_t: simde_svreinterpret_s16_f16, \
simde_svfloat32_t: simde_svreinterpret_s16_f32, \
simde_svfloat64_t: simde_svreinterpret_s16_f64)(op))
#define simde_svreinterpret_s16(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_s32_s8, \
simde_svint16_t: simde_svreinterpret_s32_s16, \
simde_svint64_t: simde_svreinterpret_s32_s64, \
simde_svuint8_t: simde_svreinterpret_s32_u8, \
simde_svuint16_t: simde_svreinterpret_s32_u16, \
simde_svuint32_t: simde_svreinterpret_s32_u32, \
simde_svuint64_t: simde_svreinterpret_s32_u64, \
simde_svfloat16_t: simde_svreinterpret_s32_f16, \
simde_svfloat32_t: simde_svreinterpret_s32_f32, \
simde_svfloat64_t: simde_svreinterpret_s32_f64)(op))
#define simde_svreinterpret_s32(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_s64_s8, \
simde_svint16_t: simde_svreinterpret_s64_s16, \
simde_svint32_t: simde_svreinterpret_s64_s32, \
simde_svuint8_t: simde_svreinterpret_s64_u8, \
simde_svuint16_t: simde_svreinterpret_s64_u16, \
simde_svuint32_t: simde_svreinterpret_s64_u32, \
simde_svuint64_t: simde_svreinterpret_s64_u64, \
simde_svfloat16_t: simde_svreinterpret_s64_f16, \
simde_svfloat32_t: simde_svreinterpret_s64_f32, \
simde_svfloat64_t: simde_svreinterpret_s64_f64)(op))
#define simde_svreinterpret_s64(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_u8_s8, \
simde_svint16_t: simde_svreinterpret_u8_s16, \
simde_svint32_t: simde_svreinterpret_u8_s32, \
simde_svint64_t: simde_svreinterpret_u8_s64, \
simde_svuint16_t: simde_svreinterpret_u8_u16, \
simde_svuint32_t: simde_svreinterpret_u8_u32, \
simde_svuint64_t: simde_svreinterpret_u8_u64, \
simde_svfloat16_t: simde_svreinterpret_u8_f16, \
simde_svfloat32_t: simde_svreinterpret_u8_f32, \
simde_svfloat64_t: simde_svreinterpret_u8_f64)(op))
#define simde_svreinterpret_u8(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_u16_s8, \
simde_svint16_t: simde_svreinterpret_u16_s16, \
simde_svint32_t: simde_svreinterpret_u16_s32, \
simde_svint64_t: simde_svreinterpret_u16_s64, \
simde_svuint8_t: simde_svreinterpret_u16_u8, \
simde_svuint32_t: simde_svreinterpret_u16_u32, \
simde_svuint64_t: simde_svreinterpret_u16_u64, \
simde_svfloat16_t: simde_svreinterpret_u16_f16, \
simde_svfloat32_t: simde_svreinterpret_u16_f32, \
simde_svfloat64_t: simde_svreinterpret_u16_f64)(op))
#define simde_svreinterpret_u16(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_u32_s8, \
simde_svint16_t: simde_svreinterpret_u32_s16, \
simde_svint32_t: simde_svreinterpret_u32_s32, \
simde_svint64_t: simde_svreinterpret_u32_s64, \
simde_svuint8_t: simde_svreinterpret_u32_u8, \
simde_svuint16_t: simde_svreinterpret_u32_u16, \
simde_svuint64_t: simde_svreinterpret_u32_u64, \
simde_svfloat16_t: simde_svreinterpret_u32_f16, \
simde_svfloat32_t: simde_svreinterpret_u32_f32, \
simde_svfloat64_t: simde_svreinterpret_u32_f64)(op))
#define simde_svreinterpret_u32(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_u64_s8, \
simde_svint16_t: simde_svreinterpret_u64_s16, \
simde_svint32_t: simde_svreinterpret_u64_s32, \
simde_svint64_t: simde_svreinterpret_u64_s64, \
simde_svuint8_t: simde_svreinterpret_u64_u8, \
simde_svuint16_t: simde_svreinterpret_u64_u16, \
simde_svuint32_t: simde_svreinterpret_u64_u32, \
simde_svfloat16_t: simde_svreinterpret_u64_f16, \
simde_svfloat32_t: simde_svreinterpret_u64_f32, \
simde_svfloat64_t: simde_svreinterpret_u64_f64)(op))
#define simde_svreinterpret_u64(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_f16_s8, \
simde_svint16_t: simde_svreinterpret_f16_s16, \
simde_svint32_t: simde_svreinterpret_f16_s32, \
simde_svint64_t: simde_svreinterpret_f16_s64, \
simde_svuint8_t: simde_svreinterpret_f16_u8, \
simde_svuint16_t: simde_svreinterpret_f16_u16, \
simde_svuint32_t: simde_svreinterpret_f16_u32, \
simde_svuint64_t: simde_svreinterpret_f16_u64, \
simde_svfloat32_t: simde_svreinterpret_f16_f32, \
simde_svfloat64_t: simde_svreinterpret_f16_f64)(op))
#define simde_svreinterpret_f16(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_f32_s8, \
simde_svint16_t: simde_svreinterpret_f32_s16, \
simde_svint32_t: simde_svreinterpret_f32_s32, \
simde_svint64_t: simde_svreinterpret_f32_s64, \
simde_svuint8_t: simde_svreinterpret_f32_u8, \
simde_svuint16_t: simde_svreinterpret_f32_u16, \
simde_svuint32_t: simde_svreinterpret_f32_u32, \
simde_svuint64_t: simde_svreinterpret_f32_u64, \
simde_svfloat16_t: simde_svreinterpret_f32_f16, \
simde_svfloat64_t: simde_svreinterpret_f32_f64)(op))
#define simde_svreinterpret_f32(op) \
(_Generic((op), \
simde_svint8_t: simde_svreinterpret_f64_s8, \
simde_svint16_t: simde_svreinterpret_f64_s16, \
simde_svint32_t: simde_svreinterpret_f64_s32, \
simde_svint64_t: simde_svreinterpret_f64_s64, \
simde_svuint8_t: simde_svreinterpret_f64_u8, \
simde_svuint16_t: simde_svreinterpret_f64_u16, \
simde_svuint32_t: simde_svreinterpret_f64_u32, \
simde_svuint64_t: simde_svreinterpret_f64_u64, \
simde_svfloat16_t: simde_svreinterpret_f64_f16, \
simde_svfloat32_t: simde_svreinterpret_f64_f32)(op))
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#define svreinterpret_f64(op) \
(_Generic((op), \
svint16_t: svreinterpret_s8_s16, \
svint32_t: svreinterpret_s8_s32, \
svint64_t: svreinterpret_s8_s64, \
svuint8_t: svreinterpret_s8_u8, \
svuint16_t: svreinterpret_s8_u16, \
svuint32_t: svreinterpret_s8_u32, \
svuint64_t: svreinterpret_s8_u64, \
svfloat16_t: svreinterpret_s8_f16, \
svfloat32_t: svreinterpret_s8_f32, \
svfloat64_t: svreinterpret_s8_f64)(op))
#define svreinterpret_s8(op) \
(_Generic((op), \
svint8_t: svreinterpret_s16_s8, \
svint32_t: svreinterpret_s16_s32, \
svint64_t: svreinterpret_s16_s64, \
svuint8_t: svreinterpret_s16_u8, \
svuint16_t: svreinterpret_s16_u16, \
svuint32_t: svreinterpret_s16_u32, \
svuint64_t: svreinterpret_s16_u64, \
svfloat16_t: svreinterpret_s16_f16, \
svfloat32_t: svreinterpret_s16_f32, \
svfloat64_t: svreinterpret_s16_f64)(op))
#define svreinterpret_s16(op) \
(_Generic((op), \
svint8_t: svreinterpret_s32_s8, \
svint16_t: svreinterpret_s32_s16, \
svint64_t: svreinterpret_s32_s64, \
svuint8_t: svreinterpret_s32_u8, \
svuint16_t: svreinterpret_s32_u16, \
svuint32_t: svreinterpret_s32_u32, \
svuint64_t: svreinterpret_s32_u64, \
svfloat16_t: svreinterpret_s32_f16, \
svfloat32_t: svreinterpret_s32_f32, \
svfloat64_t: svreinterpret_s32_f64)(op))
#define svreinterpret_s32(op) \
(_Generic((op), \
svint8_t: svreinterpret_s64_s8, \
svint16_t: svreinterpret_s64_s16, \
svint32_t: svreinterpret_s64_s32, \
svuint8_t: svreinterpret_s64_u8, \
svuint16_t: svreinterpret_s64_u16, \
svuint32_t: svreinterpret_s64_u32, \
svuint64_t: svreinterpret_s64_u64, \
svfloat16_t: svreinterpret_s64_f16, \
svfloat32_t: svreinterpret_s64_f32, \
svfloat64_t: svreinterpret_s64_f64)(op))
#define svreinterpret_s64(op) \
(_Generic((op), \
svint8_t: svreinterpret_u8_s8, \
svint16_t: svreinterpret_u8_s16, \
svint32_t: svreinterpret_u8_s32, \
svint64_t: svreinterpret_u8_s64, \
svuint16_t: svreinterpret_u8_u16, \
svuint32_t: svreinterpret_u8_u32, \
svuint64_t: svreinterpret_u8_u64, \
svfloat16_t: svreinterpret_u8_f16, \
svfloat32_t: svreinterpret_u8_f32, \
svfloat64_t: svreinterpret_u8_f64)(op))
#define svreinterpret_u8(op) \
(_Generic((op), \
svint8_t: svreinterpret_u16_s8, \
svint16_t: svreinterpret_u16_s16, \
svint32_t: svreinterpret_u16_s32, \
svint64_t: svreinterpret_u16_s64, \
svuint8_t: svreinterpret_u16_u8, \
svuint32_t: svreinterpret_u16_u32, \
svuint64_t: svreinterpret_u16_u64, \
svfloat16_t: svreinterpret_u16_f16, \
svfloat32_t: svreinterpret_u16_f32, \
svfloat64_t: svreinterpret_u16_f64)(op))
#define svreinterpret_u16(op) \
(_Generic((op), \
svint8_t: svreinterpret_u32_s8, \
svint16_t: svreinterpret_u32_s16, \
svint32_t: svreinterpret_u32_s32, \
svint64_t: svreinterpret_u32_s64, \
svuint8_t: svreinterpret_u32_u8, \
svuint16_t: svreinterpret_u32_u16, \
svuint64_t: svreinterpret_u32_u64, \
svfloat16_t: svreinterpret_u32_f16, \
svfloat32_t: svreinterpret_u32_f32, \
svfloat64_t: svreinterpret_u32_f64)(op))
#define svreinterpret_u32(op) \
(_Generic((op), \
svint8_t: svreinterpret_u64_s8, \
svint16_t: svreinterpret_u64_s16, \
svint32_t: svreinterpret_u64_s32, \
svint64_t: svreinterpret_u64_s64, \
svuint8_t: svreinterpret_u64_u8, \
svuint16_t: svreinterpret_u64_u16, \
svuint32_t: svreinterpret_u64_u32, \
svfloat16_t: svreinterpret_u64_f16, \
svfloat32_t: svreinterpret_u64_f32, \
svfloat64_t: svreinterpret_u64_f64)(op))
#define svreinterpret_u64(op) \
(_Generic((op), \
svint8_t: svreinterpret_f16_s8, \
svint16_t: svreinterpret_f16_s16, \
svint32_t: svreinterpret_f16_s32, \
svint64_t: svreinterpret_f16_s64, \
svuint8_t: svreinterpret_f16_u8, \
svuint16_t: svreinterpret_f16_u16, \
svuint32_t: svreinterpret_f16_u32, \
svuint64_t: svreinterpret_f16_u64, \
svfloat32_t: svreinterpret_f16_f32, \
svfloat64_t: svreinterpret_f16_f64)(op))
#define svreinterpret_f16(op) \
(_Generic((op), \
svint8_t: svreinterpret_f32_s8, \
svint16_t: svreinterpret_f32_s16, \
svint32_t: svreinterpret_f32_s32, \
svint64_t: svreinterpret_f32_s64, \
svuint8_t: svreinterpret_f32_u8, \
svuint16_t: svreinterpret_f32_u16, \
svuint32_t: svreinterpret_f32_u32, \
svuint64_t: svreinterpret_f32_u64, \
svfloat16_t: svreinterpret_f32_f16, \
svfloat64_t: svreinterpret_f32_f64)(op))
#define svreinterpret_f32(op) \
(_Generic((op), \
svint8_t: svreinterpret_f64_s8, \
svint16_t: svreinterpret_f64_s16, \
svint32_t: svreinterpret_f64_s32, \
svint64_t: svreinterpret_f64_s64, \
svuint8_t: svreinterpret_f64_u8, \
svuint16_t: svreinterpret_f64_u16, \
svuint32_t: svreinterpret_f64_u32, \
svuint64_t: svreinterpret_f64_u64, \
svfloat16_t: svreinterpret_f64_f16, \
svfloat32_t: svreinterpret_f64_f32)(op))
#endif /* defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) */
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_REINTERPRET_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/ptest.h | .h | 2,399 | 72 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_PTEST_H)
#define SIMDE_ARM_SVE_PTEST_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_bool
simde_svptest_first(simde_svbool_t pg, simde_svbool_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svptest_first(pg, op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
if (HEDLEY_LIKELY(pg.value & 1))
return op.value & 1;
if (pg.value == 0 || op.value == 0)
return 0;
#if defined(_MSC_VER)
unsigned long r = 0;
_BitScanForward64(&r, HEDLEY_STATIC_CAST(uint64_t, pg.value));
return (op.value >> r) & 1;
#else
return (op.value >> __builtin_ctzll(HEDLEY_STATIC_CAST(unsigned long long, pg.value))) & 1;
#endif
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
if (pg.values_i8[i]) {
return !!op.values_i8[i];
}
}
return 0;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svptest_first
#define svptest_first(pg, op) simde_svptest_first(pg, op)
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_PTEST_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/sub.h | .h | 55,418 | 1,351 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_SUB_H)
#define SIMDE_ARM_SVE_SUB_H
#include "types.h"
#include "sel.h"
#include "dup.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsub_s8_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s8_x(pg, op1, op2);
#else
simde_svint8_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_s8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi8(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi8(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi8(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi8(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s8_x
#define svsub_s8_x(pg, op1, op2) simde_svsub_s8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsub_s8_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s8_z(pg, op1, op2);
#else
return simde_x_svsel_s8_z(pg, simde_svsub_s8_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s8_z
#define svsub_s8_z(pg, op1, op2) simde_svsub_s8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsub_s8_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s8_m(pg, op1, op2);
#else
return simde_svsel_s8(pg, simde_svsub_s8_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s8_m
#define svsub_s8_m(pg, op1, op2) simde_svsub_s8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsub_n_s8_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s8_x(pg, op1, op2);
#else
return simde_svsub_s8_x(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s8_x
#define svsub_n_s8_x(pg, op1, op2) simde_svsub_n_s8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsub_n_s8_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s8_z(pg, op1, op2);
#else
return simde_svsub_s8_z(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s8_z
#define svsub_n_s8_z(pg, op1, op2) simde_svsub_n_s8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsub_n_s8_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s8_m(pg, op1, op2);
#else
return simde_svsub_s8_m(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s8_m
#define svsub_n_s8_m(pg, op1, op2) simde_svsub_n_s8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsub_s16_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s16_x(pg, op1, op2);
#else
simde_svint16_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_s16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi16(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi16(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi16(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi16(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s16_x
#define svsub_s16_x(pg, op1, op2) simde_svsub_s16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsub_s16_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s16_z(pg, op1, op2);
#else
return simde_x_svsel_s16_z(pg, simde_svsub_s16_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s16_z
#define svsub_s16_z(pg, op1, op2) simde_svsub_s16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsub_s16_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s16_m(pg, op1, op2);
#else
return simde_svsel_s16(pg, simde_svsub_s16_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s16_m
#define svsub_s16_m(pg, op1, op2) simde_svsub_s16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsub_n_s16_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s16_x(pg, op1, op2);
#else
return simde_svsub_s16_x(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s16_x
#define svsub_n_s16_x(pg, op1, op2) simde_svsub_n_s16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsub_n_s16_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s16_z(pg, op1, op2);
#else
return simde_svsub_s16_z(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s16_z
#define svsub_n_s16_z(pg, op1, op2) simde_svsub_n_s16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsub_n_s16_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s16_m(pg, op1, op2);
#else
return simde_svsub_s16_m(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s16_m
#define svsub_n_s16_m(pg, op1, op2) simde_svsub_n_s16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsub_s32_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s32_x(pg, op1, op2);
#else
simde_svint32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_s32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi32(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi32(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi32(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi32(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i32x4_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s32_x
#define svsub_s32_x(pg, op1, op2) simde_svsub_s32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsub_s32_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s32_z(pg, op1, op2);
#else
return simde_x_svsel_s32_z(pg, simde_svsub_s32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s32_z
#define svsub_s32_z(pg, op1, op2) simde_svsub_s32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsub_s32_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s32_m(pg, op1, op2);
#else
return simde_svsel_s32(pg, simde_svsub_s32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s32_m
#define svsub_s32_m(pg, op1, op2) simde_svsub_s32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsub_n_s32_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s32_x(pg, op1, op2);
#else
return simde_svsub_s32_x(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s32_x
#define svsub_n_s32_x(pg, op1, op2) simde_svsub_n_s32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsub_n_s32_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s32_z(pg, op1, op2);
#else
return simde_svsub_s32_z(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s32_z
#define svsub_n_s32_z(pg, op1, op2) simde_svsub_n_s32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsub_n_s32_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s32_m(pg, op1, op2);
#else
return simde_svsub_s32_m(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s32_m
#define svsub_n_s32_m(pg, op1, op2) simde_svsub_n_s32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsub_s64_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s64_x(pg, op1, op2);
#else
simde_svint64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_s64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi64(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi64(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi64(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi64(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i64x2_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s64_x
#define svsub_s64_x(pg, op1, op2) simde_svsub_s64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsub_s64_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s64_z(pg, op1, op2);
#else
return simde_x_svsel_s64_z(pg, simde_svsub_s64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s64_z
#define svsub_s64_z(pg, op1, op2) simde_svsub_s64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsub_s64_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_s64_m(pg, op1, op2);
#else
return simde_svsel_s64(pg, simde_svsub_s64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_s64_m
#define svsub_s64_m(pg, op1, op2) simde_svsub_s64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsub_n_s64_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s64_x(pg, op1, op2);
#else
return simde_svsub_s64_x(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s64_x
#define svsub_n_s64_x(pg, op1, op2) simde_svsub_n_s64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsub_n_s64_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s64_z(pg, op1, op2);
#else
return simde_svsub_s64_z(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s64_z
#define svsub_n_s64_z(pg, op1, op2) simde_svsub_n_s64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsub_n_s64_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_s64_m(pg, op1, op2);
#else
return simde_svsub_s64_m(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_s64_m
#define svsub_n_s64_m(pg, op1, op2) simde_svsub_n_s64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsub_u8_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u8_x(pg, op1, op2);
#else
simde_svuint8_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_u8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi8(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi8(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi8(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi8(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u8_x
#define svsub_u8_x(pg, op1, op2) simde_svsub_u8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsub_u8_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u8_z(pg, op1, op2);
#else
return simde_x_svsel_u8_z(pg, simde_svsub_u8_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u8_z
#define svsub_u8_z(pg, op1, op2) simde_svsub_u8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsub_u8_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u8_m(pg, op1, op2);
#else
return simde_svsel_u8(pg, simde_svsub_u8_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u8_m
#define svsub_u8_m(pg, op1, op2) simde_svsub_u8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsub_n_u8_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u8_x(pg, op1, op2);
#else
return simde_svsub_u8_x(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u8_x
#define svsub_n_u8_x(pg, op1, op2) simde_svsub_n_u8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsub_n_u8_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u8_z(pg, op1, op2);
#else
return simde_svsub_u8_z(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u8_z
#define svsub_n_u8_z(pg, op1, op2) simde_svsub_n_u8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsub_n_u8_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u8_m(pg, op1, op2);
#else
return simde_svsub_u8_m(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u8_m
#define svsub_n_u8_m(pg, op1, op2) simde_svsub_n_u8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsub_u16_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u16_x(pg, op1, op2);
#else
simde_svuint16_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_u16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi16(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi16(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi16(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi16(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u16_x
#define svsub_u16_x(pg, op1, op2) simde_svsub_u16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsub_u16_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u16_z(pg, op1, op2);
#else
return simde_x_svsel_u16_z(pg, simde_svsub_u16_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u16_z
#define svsub_u16_z(pg, op1, op2) simde_svsub_u16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsub_u16_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u16_m(pg, op1, op2);
#else
return simde_svsel_u16(pg, simde_svsub_u16_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u16_m
#define svsub_u16_m(pg, op1, op2) simde_svsub_u16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsub_n_u16_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u16_x(pg, op1, op2);
#else
return simde_svsub_u16_x(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u16_x
#define svsub_n_u16_x(pg, op1, op2) simde_svsub_n_u16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsub_n_u16_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u16_z(pg, op1, op2);
#else
return simde_svsub_u16_z(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u16_z
#define svsub_n_u16_z(pg, op1, op2) simde_svsub_n_u16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsub_n_u16_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u16_m(pg, op1, op2);
#else
return simde_svsub_u16_m(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u16_m
#define svsub_n_u16_m(pg, op1, op2) simde_svsub_n_u16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsub_u32_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u32_x(pg, op1, op2);
#else
simde_svuint32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_u32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi32(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi32(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi32(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi32(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i32x4_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u32_x
#define svsub_u32_x(pg, op1, op2) simde_svsub_u32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsub_u32_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u32_z(pg, op1, op2);
#else
return simde_x_svsel_u32_z(pg, simde_svsub_u32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u32_z
#define svsub_u32_z(pg, op1, op2) simde_svsub_u32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsub_u32_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u32_m(pg, op1, op2);
#else
return simde_svsel_u32(pg, simde_svsub_u32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u32_m
#define svsub_u32_m(pg, op1, op2) simde_svsub_u32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsub_n_u32_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u32_x(pg, op1, op2);
#else
return simde_svsub_u32_x(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u32_x
#define svsub_n_u32_x(pg, op1, op2) simde_svsub_n_u32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsub_n_u32_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u32_z(pg, op1, op2);
#else
return simde_svsub_u32_z(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u32_z
#define svsub_n_u32_z(pg, op1, op2) simde_svsub_n_u32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsub_n_u32_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u32_m(pg, op1, op2);
#else
return simde_svsub_u32_m(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u32_m
#define svsub_n_u32_m(pg, op1, op2) simde_svsub_n_u32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsub_u64_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u64_x(pg, op1, op2);
#else
simde_svuint64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_u64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_sub_epi64(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_sub_epi64(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_sub_epi64(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_sub_epi64(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i64x2_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u64_x
#define svsub_u64_x(pg, op1, op2) simde_svsub_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsub_u64_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u64_z(pg, op1, op2);
#else
return simde_x_svsel_u64_z(pg, simde_svsub_u64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u64_z
#define svsub_u64_z(pg, op1, op2) simde_svsub_u64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsub_u64_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_u64_m(pg, op1, op2);
#else
return simde_svsel_u64(pg, simde_svsub_u64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_u64_m
#define svsub_u64_m(pg, op1, op2) simde_svsub_u64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsub_n_u64_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u64_x(pg, op1, op2);
#else
return simde_svsub_u64_x(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u64_x
#define svsub_n_u64_x(pg, op1, op2) simde_svsub_n_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsub_n_u64_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u64_z(pg, op1, op2);
#else
return simde_svsub_u64_z(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u64_z
#define svsub_n_u64_z(pg, op1, op2) simde_svsub_n_u64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsub_n_u64_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_u64_m(pg, op1, op2);
#else
return simde_svsub_u64_m(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_u64_m
#define svsub_n_u64_m(pg, op1, op2) simde_svsub_n_u64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsub_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_f32_x(pg, op1, op2);
#else
simde_svfloat32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vsubq_f32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512 = _mm512_sub_ps(op1.m512, op2.m512);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256[0] = _mm256_sub_ps(op1.m256[0], op2.m256[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256) / sizeof(r.m256[0])) ; i++) {
r.m256[i] = _mm256_sub_ps(op1.m256[i], op2.m256[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128) / sizeof(r.m128[0])) ; i++) {
r.m128[i] = _mm_sub_ps(op1.m128[i], op2.m128[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_f32x4_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_f32_x
#define svsub_f32_x(pg, op1, op2) simde_svsub_f32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsub_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_f32_z(pg, op1, op2);
#else
return simde_x_svsel_f32_z(pg, simde_svsub_f32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_f32_z
#define svsub_f32_z(pg, op1, op2) simde_svsub_f32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsub_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_f32_m(pg, op1, op2);
#else
return simde_svsel_f32(pg, simde_svsub_f32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_f32_m
#define svsub_f32_m(pg, op1, op2) simde_svsub_f32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsub_n_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_f32_x(pg, op1, op2);
#else
return simde_svsub_f32_x(pg, op1, simde_svdup_n_f32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_f32_x
#define svsub_n_f32_x(pg, op1, op2) simde_svsub_n_f32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsub_n_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_f32_z(pg, op1, op2);
#else
return simde_svsub_f32_z(pg, op1, simde_svdup_n_f32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_f32_z
#define svsub_n_f32_z(pg, op1, op2) simde_svsub_n_f32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsub_n_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_f32_m(pg, op1, op2);
#else
return simde_svsub_f32_m(pg, op1, simde_svdup_n_f32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_f32_m
#define svsub_n_f32_m(pg, op1, op2) simde_svsub_n_f32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsub_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_f64_x(pg, op1, op2);
#else
simde_svfloat64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r.neon = vsubq_f64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512d = _mm512_sub_pd(op1.m512d, op2.m512d);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256d[0] = _mm256_sub_pd(op1.m256d[0], op2.m256d[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256d) / sizeof(r.m256d[0])) ; i++) {
r.m256d[i] = _mm256_sub_pd(op1.m256d[i], op2.m256d[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128d) / sizeof(r.m128d[0])) ; i++) {
r.m128d[i] = _mm_sub_pd(op1.m128d[i], op2.m128d[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r.altivec = vec_sub(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec - op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_f64x2_sub(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values - op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] - op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_f64_x
#define svsub_f64_x(pg, op1, op2) simde_svsub_f64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsub_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_f64_z(pg, op1, op2);
#else
return simde_x_svsel_f64_z(pg, simde_svsub_f64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_f64_z
#define svsub_f64_z(pg, op1, op2) simde_svsub_f64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsub_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_f64_m(pg, op1, op2);
#else
return simde_svsel_f64(pg, simde_svsub_f64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_f64_m
#define svsub_f64_m(pg, op1, op2) simde_svsub_f64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsub_n_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_f64_x(pg, op1, op2);
#else
return simde_svsub_f64_x(pg, op1, simde_svdup_n_f64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_f64_x
#define svsub_n_f64_x(pg, op1, op2) simde_svsub_n_f64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsub_n_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_f64_z(pg, op1, op2);
#else
return simde_svsub_f64_z(pg, op1, simde_svdup_n_f64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_f64_z
#define svsub_n_f64_z(pg, op1, op2) simde_svsub_n_f64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsub_n_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsub_n_f64_m(pg, op1, op2);
#else
return simde_svsub_f64_m(pg, op1, simde_svdup_n_f64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsub_n_f64_m
#define svsub_n_f64_m(pg, op1, op2) simde_svsub_n_f64_m(pg, op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsub_s8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsub_s16_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsub_s32_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsub_s64_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsub_u8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsub_u16_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsub_u32_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsub_u64_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsub_f32_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsub_f64_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsub_s8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsub_s16_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsub_s32_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsub_s64_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsub_u8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsub_u16_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsub_u32_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsub_u64_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsub_f32_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsub_f64_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsub_s8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsub_s16_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsub_s32_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsub_s64_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsub_u8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsub_u16_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsub_u32_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsub_u64_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsub_f32_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsub_f64_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svsub_n_s8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svsub_n_s16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svsub_n_s32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svsub_n_s64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svsub_n_u8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svsub_n_u16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svsub_n_u32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svsub_n_u64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svsub_n_f32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svsub_n_f64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svsub_n_s8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svsub_n_s16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svsub_n_s32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svsub_n_s64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svsub_n_u8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svsub_n_u16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svsub_n_u32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svsub_n_u64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svsub_n_f32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svsub_n_f64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svsub_n_s8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svsub_n_s16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svsub_n_s32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svsub_n_s64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svsub_n_u8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svsub_n_u16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svsub_n_u32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svsub_n_u64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svsub_n_f32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svsub_n_f64_m(pg, op1, op2); }
#elif defined(SIMDE_GENERIC_)
#define simde_svsub_x(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svsub_s8_x, \
simde_svint16_t: simde_svsub_s16_x, \
simde_svint32_t: simde_svsub_s32_x, \
simde_svint64_t: simde_svsub_s64_x, \
simde_svuint8_t: simde_svsub_u8_x, \
simde_svuint16_t: simde_svsub_u16_x, \
simde_svuint32_t: simde_svsub_u32_x, \
simde_svuint64_t: simde_svsub_u64_x, \
simde_svfloat32_t: simde_svsub_f32_x, \
simde_svfloat64_t: simde_svsub_f64_x, \
int8_t: simde_svsub_n_s8_x, \
int16_t: simde_svsub_n_s16_x, \
int32_t: simde_svsub_n_s32_x, \
int64_t: simde_svsub_n_s64_x, \
uint8_t: simde_svsub_n_u8_x, \
uint16_t: simde_svsub_n_u16_x, \
uint32_t: simde_svsub_n_u32_x, \
uint64_t: simde_svsub_n_u64_x, \
simde_float32: simde_svsub_n_f32_x, \
simde_float64: simde_svsub_n_f64_x)((pg), (op1), (op2)))
#define simde_svsub_z(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svsub_s8_z, \
simde_svint16_t: simde_svsub_s16_z, \
simde_svint32_t: simde_svsub_s32_z, \
simde_svint64_t: simde_svsub_s64_z, \
simde_svuint8_t: simde_svsub_u8_z, \
simde_svuint16_t: simde_svsub_u16_z, \
simde_svuint32_t: simde_svsub_u32_z, \
simde_svuint64_t: simde_svsub_u64_z, \
simde_svfloat32_t: simde_svsub_f32_z, \
simde_svfloat64_t: simde_svsub_f64_z, \
int8_t: simde_svsub_n_s8_z, \
int16_t: simde_svsub_n_s16_z, \
int32_t: simde_svsub_n_s32_z, \
int64_t: simde_svsub_n_s64_z, \
uint8_t: simde_svsub_n_u8_z, \
uint16_t: simde_svsub_n_u16_z, \
uint32_t: simde_svsub_n_u32_z, \
uint64_t: simde_svsub_n_u64_z, \
simde_float32: simde_svsub_n_f32_z, \
simde_float64: simde_svsub_n_f64_z)((pg), (op1), (op2)))
#define simde_svsub_m(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svsub_s8_m, \
simde_svint16_t: simde_svsub_s16_m, \
simde_svint32_t: simde_svsub_s32_m, \
simde_svint64_t: simde_svsub_s64_m, \
simde_svuint8_t: simde_svsub_u8_m, \
simde_svuint16_t: simde_svsub_u16_m, \
simde_svuint32_t: simde_svsub_u32_m, \
simde_svuint64_t: simde_svsub_u64_m, \
simde_svfloat32_t: simde_svsub_f32_m, \
simde_svfloat64_t: simde_svsub_f64_m, \
int8_t: simde_svsub_n_s8_m, \
int16_t: simde_svsub_n_s16_m, \
int32_t: simde_svsub_n_s32_m, \
int64_t: simde_svsub_n_s64_m, \
uint8_t: simde_svsub_n_u8_m, \
uint16_t: simde_svsub_n_u16_m, \
uint32_t: simde_svsub_n_u32_m, \
uint64_t: simde_svsub_n_u64_m, \
simde_float32: simde_svsub_n_f32_m, \
simde_float64: simde_svsub_n_f64_m)((pg), (op1), (op2)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svsub_x
#undef svsub_z
#undef svsub_m
#undef svsub_n_x
#undef svsub_n_z
#undef svsub_n_m
#define svsub_x(pg, op1, op2) simde_svsub_x((pg), (op1), (op2))
#define svsub_z(pg, op1, op2) simde_svsub_z((pg), (op1), (op2))
#define svsub_m(pg, op1, op2) simde_svsub_m((pg), (op1), (op2))
#define svsub_n_x(pg, op1, op2) simde_svsub_n_x((pg), (op1), (op2))
#define svsub_n_z(pg, op1, op2) simde_svsub_n_z((pg), (op1), (op2))
#define svsub_n_m(pg, op1, op2) simde_svsub_n_m((pg), (op1), (op2))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_SUB_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/ptrue.h | .h | 4,711 | 158 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_PTRUE_H)
#define SIMDE_ARM_SVE_PTRUE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svptrue_b8(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svptrue_b8();
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svbool_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r = simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, ~UINT64_C(0)));
#else
r = simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)));
#endif
return r;
#else
simde_svint8_t r;
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = ~INT8_C(0);
}
return simde_svbool_from_svint8(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svptrue_b8
#define svptrue_b8() simde_svptrue_b8()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svptrue_b16(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svptrue_b16();
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svbool_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r = simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)));
#else
r = simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)));
#endif
return r;
#else
simde_svint16_t r;
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = ~INT16_C(0);
}
return simde_svbool_from_svint16(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svptrue_b16
#define svptrue_b16() simde_svptrue_b16()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svptrue_b32(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svptrue_b32();
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svbool_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r = simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)));
#else
r = simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)));
#endif
return r;
#else
simde_svint32_t r;
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = ~INT32_C(0);
}
return simde_svbool_from_svint32(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svptrue_b32
#define svptrue_b32() simde_svptrue_b32()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svbool_t
simde_svptrue_b64(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svptrue_b64();
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svbool_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r = simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)));
#else
r = simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)));
#endif
return r;
#else
simde_svint64_t r;
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = ~INT64_C(0);
}
return simde_svbool_from_svint64(r);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svptrue_b64
#define svptrue_b64() simde_svptrue_b64()
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_PTRUE_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/ld1.h | .h | 14,386 | 359 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
/* Note: we don't have vector implementations for most of these because
* we can't just load everything and mask out the uninteresting bits;
* that might cause a fault, for example if the end of the buffer buts
* up against a protected page.
*
* One thing we might be able to do would be to check if the predicate
* is all ones and, if so, use an unpredicated load instruction. This
* would probably we worthwhile for smaller types, though perhaps not
* for larger types since it would mean branching for every load plus
* the overhead of checking whether all bits are 1. */
#if !defined(SIMDE_ARM_SVE_LD1_H)
#define SIMDE_ARM_SVE_LD1_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svld1_s8(simde_svbool_t pg, const int8_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_s8(pg, base);
#else
simde_svint8_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi8(simde_svbool_to_mmask64(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi8(simde_svbool_to_mmask32(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = pg.values_i8[i] ? base[i] : INT8_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_s8
#define svld1_s8(pg, base) simde_svld1_s8((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svld1_s16(simde_svbool_t pg, const int16_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_s16(pg, base);
#else
simde_svint16_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi16(simde_svbool_to_mmask32(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi16(simde_svbool_to_mmask16(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = pg.values_i16[i] ? base[i] : INT16_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_s16
#define svld1_s16(pg, base) simde_svld1_s16((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svld1_s32(simde_svbool_t pg, const int32_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_s32(pg, base);
#else
simde_svint32_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi32(simde_svbool_to_mmask16(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi32(simde_svbool_to_mmask8(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = pg.values_i32[i] ? base[i] : INT32_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_s32
#define svld1_s32(pg, base) simde_svld1_s32((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svld1_s64(simde_svbool_t pg, const int64_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_s64(pg, base);
#else
simde_svint64_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi64(simde_svbool_to_mmask8(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi64(simde_svbool_to_mmask4(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = pg.values_i64[i] ? base[i] : INT64_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_s64
#define svld1_s64(pg, base) simde_svld1_s64((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svld1_u8(simde_svbool_t pg, const uint8_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_u8(pg, base);
#else
simde_svuint8_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi8(simde_svbool_to_mmask64(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi8(simde_svbool_to_mmask32(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
r.values[i] = pg.values_i8[i] ? base[i] : UINT8_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_u8
#define svld1_u8(pg, base) simde_svld1_u8((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svld1_u16(simde_svbool_t pg, const uint16_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_u16(pg, base);
#else
simde_svuint16_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi16(simde_svbool_to_mmask32(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi16(simde_svbool_to_mmask16(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
r.values[i] = pg.values_i16[i] ? base[i] : UINT16_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_u16
#define svld1_u16(pg, base) simde_svld1_u16((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svld1_u32(simde_svbool_t pg, const uint32_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_u32(pg, base);
#else
simde_svuint32_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi32(simde_svbool_to_mmask16(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi32(simde_svbool_to_mmask8(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = pg.values_i32[i] ? base[i] : UINT32_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_u32
#define svld1_u32(pg, base) simde_svld1_u32((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svld1_u64(simde_svbool_t pg, const uint64_t * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_u64(pg, base);
#else
simde_svuint64_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_loadu_epi64(simde_svbool_to_mmask8(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_loadu_epi64(simde_svbool_to_mmask4(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = pg.values_i64[i] ? base[i] : UINT64_C(0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_u64
#define svld1_u64(pg, base) simde_svld1_u64((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svld1_f32(simde_svbool_t pg, const simde_float32 * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_f32(pg, base);
#else
simde_svfloat32_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512 = _mm512_maskz_loadu_ps(simde_svbool_to_mmask16(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256[0] = _mm256_maskz_loadu_ps(simde_svbool_to_mmask8(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
r.values[i] = pg.values_i32[i] ? base[i] : SIMDE_FLOAT32_C(0.0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_f32
#define svld1_f32(pg, base) simde_svld1_f32((pg), (base))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svld1_f64(simde_svbool_t pg, const simde_float64 * base) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svld1_f64(pg, base);
#else
simde_svfloat64_t r;
#if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512d = _mm512_maskz_loadu_pd(simde_svbool_to_mmask8(pg), base);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256d[0] = _mm256_maskz_loadu_pd(simde_svbool_to_mmask4(pg), base);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
r.values[i] = pg.values_i64[i] ? base[i] : SIMDE_FLOAT64_C(0.0);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svld1_f64
#define svld1_f64(pg, base) simde_svld1_f64((pg), (base))
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svld1(simde_svbool_t pg, const int8_t * base) { return simde_svld1_s8 (pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svld1(simde_svbool_t pg, const int16_t * base) { return simde_svld1_s16(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svld1(simde_svbool_t pg, const int32_t * base) { return simde_svld1_s32(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svld1(simde_svbool_t pg, const int64_t * base) { return simde_svld1_s64(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svld1(simde_svbool_t pg, const uint8_t * base) { return simde_svld1_u8 (pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svld1(simde_svbool_t pg, const uint16_t * base) { return simde_svld1_u16(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svld1(simde_svbool_t pg, const uint32_t * base) { return simde_svld1_u32(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svld1(simde_svbool_t pg, const uint64_t * base) { return simde_svld1_u64(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svld1(simde_svbool_t pg, const simde_float32 * base) { return simde_svld1_f32(pg, base); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svld1(simde_svbool_t pg, const simde_float64 * base) { return simde_svld1_f64(pg, base); }
#elif defined(SIMDE_GENERIC_)
#define simde_svld1(pg, base) \
(SIMDE_GENERIC_((base), \
const int8_t *: simde_svld1_s8 , \
const int16_t *: simde_svld1_s16, \
const int32_t *: simde_svld1_s32, \
const int64_t *: simde_svld1_s64, \
const uint8_t *: simde_svld1_u8 , \
const uint16_t *: simde_svld1_u16, \
const uint32_t *: simde_svld1_u32, \
const uint64_t *: simde_svld1_u64, \
const simde_float32 *: simde_svld1_f32, \
const simde_float64 *: simde_svld1_f64)(pg, base))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svld1
#define svld1(pg, base) simde_svld1((pg), (base))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_LD1_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/and.h | .h | 41,391 | 1,004 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_AND_H)
#define SIMDE_ARM_SVE_AND_H
#include "types.h"
#include "dup.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svand_s8_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s8_x(pg, op1, op2);
#else
simde_svint8_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_and_si512(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_and(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec & op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values & op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] & op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s8_x
#define svand_s8_x(pg, op1, op2) simde_svand_s8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svand_s8_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s8_z(pg, op1, op2);
#else
return simde_x_svsel_s8_z(pg, simde_svand_s8_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s8_z
#define svand_s8_z(pg, op1, op2) simde_svand_s8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svand_s8_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s8_m(pg, op1, op2);
#else
return simde_svsel_s8(pg, simde_svand_s8_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s8_m
#define svand_s8_m(pg, op1, op2) simde_svand_s8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svand_n_s8_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s8_z(pg, op1, op2);
#else
return simde_svand_s8_z(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s8_z
#define svand_n_s8_z(pg, op1, op2) simde_svand_n_s8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svand_n_s8_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s8_m(pg, op1, op2);
#else
return simde_svand_s8_m(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s8_m
#define svand_n_s8_m(pg, op1, op2) simde_svand_n_s8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svand_n_s8_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s8_x(pg, op1, op2);
#else
return simde_svand_s8_x(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s8_x
#define svand_n_s8_x(pg, op1, op2) simde_svand_n_s8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svand_s16_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s16_x(pg, op1, op2);
#else
simde_svint16_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_and_si512(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_and(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec & op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values & op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] & op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s16_x
#define svand_s16_x(pg, op1, op2) simde_svand_s16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svand_s16_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s16_z(pg, op1, op2);
#else
return simde_x_svsel_s16_z(pg, simde_svand_s16_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s16_z
#define svand_s16_z(pg, op1, op2) simde_svand_s16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svand_s16_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s16_m(pg, op1, op2);
#else
return simde_svsel_s16(pg, simde_svand_s16_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s16_m
#define svand_s16_m(pg, op1, op2) simde_svand_s16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svand_n_s16_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s16_z(pg, op1, op2);
#else
return simde_svand_s16_z(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s16_z
#define svand_n_s16_z(pg, op1, op2) simde_svand_n_s16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svand_n_s16_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s16_m(pg, op1, op2);
#else
return simde_svand_s16_m(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s16_m
#define svand_n_s16_m(pg, op1, op2) simde_svand_n_s16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svand_n_s16_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s16_x(pg, op1, op2);
#else
return simde_svand_s16_x(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s16_x
#define svand_n_s16_x(pg, op1, op2) simde_svand_n_s16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svand_s32_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s32_x(pg, op1, op2);
#else
simde_svint32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_and_si512(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_and(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec & op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values & op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] & op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s32_x
#define svand_s32_x(pg, op1, op2) simde_svand_s32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svand_s32_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s32_z(pg, op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svint32_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r.m512i = _mm512_maskz_and_epi32(simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i);
#else
r.m256i[0] = _mm256_maskz_and_epi32(simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0]);
#endif
return r;
#else
return simde_x_svsel_s32_z(pg, simde_svand_s32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s32_z
#define svand_s32_z(pg, op1, op2) simde_svand_s32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svand_s32_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s32_m(pg, op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svint32_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r.m512i = _mm512_mask_and_epi32(op1.m512i, simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i);
#else
r.m256i[0] = _mm256_mask_and_epi32(op1.m256i[0], simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0]);
#endif
return r;
#else
return simde_svsel_s32(pg, simde_svand_s32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s32_m
#define svand_s32_m(pg, op1, op2) simde_svand_s32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svand_n_s32_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s32_z(pg, op1, op2);
#else
return simde_svand_s32_z(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s32_z
#define svand_n_s32_z(pg, op1, op2) simde_svand_n_s32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svand_n_s32_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s32_m(pg, op1, op2);
#else
return simde_svand_s32_m(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s32_m
#define svand_n_s32_m(pg, op1, op2) simde_svand_n_s32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svand_n_s32_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s32_x(pg, op1, op2);
#else
return simde_svand_s32_x(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s32_x
#define svand_n_s32_x(pg, op1, op2) simde_svand_n_s32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svand_s64_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s64_x(pg, op1, op2);
#else
simde_svint64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_and_si512(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r.altivec = vec_and(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec & op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values & op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] & op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s64_x
#define svand_s64_x(pg, op1, op2) simde_svand_s64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svand_s64_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s64_z(pg, op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svint64_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r.m512i = _mm512_maskz_and_epi64(simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i);
#else
r.m256i[0] = _mm256_maskz_and_epi64(simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0]);
#endif
return r;
#else
return simde_x_svsel_s64_z(pg, simde_svand_s64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s64_z
#define svand_s64_z(pg, op1, op2) simde_svand_s64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svand_s64_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s64_m(pg, op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svint64_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r.m512i = _mm512_mask_and_epi64(op1.m512i, simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i);
#else
r.m256i[0] = _mm256_mask_and_epi64(op1.m256i[0], simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0]);
#endif
return r;
#else
return simde_svsel_s64(pg, simde_svand_s64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_s64_m
#define svand_s64_m(pg, op1, op2) simde_svand_s64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svand_n_s64_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s64_z(pg, op1, op2);
#else
return simde_svand_s64_z(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s64_z
#define svand_n_s64_z(pg, op1, op2) simde_svand_n_s64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svand_n_s64_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s64_m(pg, op1, op2);
#else
return simde_svand_s64_m(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s64_m
#define svand_n_s64_m(pg, op1, op2) simde_svand_n_s64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svand_n_s64_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_s64_x(pg, op1, op2);
#else
return simde_svand_s64_x(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_s64_x
#define svand_n_s64_x(pg, op1, op2) simde_svand_n_s64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svand_u8_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u8_z(pg, op1, op2);
#else
return simde_svreinterpret_u8_s8(simde_svand_s8_z(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u8_z
#define svand_u8_z(pg, op1, op2) simde_svand_u8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svand_u8_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u8_m(pg, op1, op2);
#else
return simde_svreinterpret_u8_s8(simde_svand_s8_m(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u8_m
#define svand_u8_m(pg, op1, op2) simde_svand_u8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svand_u8_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u8_x(pg, op1, op2);
#else
return simde_svreinterpret_u8_s8(simde_svand_s8_x(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u8_x
#define svand_u8_x(pg, op1, op2) simde_svand_u8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svand_n_u8_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u8_z(pg, op1, op2);
#else
return simde_svand_u8_z(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u8_z
#define svand_n_u8_z(pg, op1, op2) simde_svand_n_u8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svand_n_u8_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u8_m(pg, op1, op2);
#else
return simde_svand_u8_m(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u8_m
#define svand_n_u8_m(pg, op1, op2) simde_svand_n_u8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svand_n_u8_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u8_x(pg, op1, op2);
#else
return simde_svand_u8_x(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u8_x
#define svand_n_u8_x(pg, op1, op2) simde_svand_n_u8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svand_u16_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u16_z(pg, op1, op2);
#else
return simde_svreinterpret_u16_s16(simde_svand_s16_z(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u16_z
#define svand_u16_z(pg, op1, op2) simde_svand_u16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svand_u16_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u16_m(pg, op1, op2);
#else
return simde_svreinterpret_u16_s16(simde_svand_s16_m(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u16_m
#define svand_u16_m(pg, op1, op2) simde_svand_u16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svand_u16_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u16_x(pg, op1, op2);
#else
return simde_svreinterpret_u16_s16(simde_svand_s16_x(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u16_x
#define svand_u16_x(pg, op1, op2) simde_svand_u16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svand_n_u16_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u16_z(pg, op1, op2);
#else
return simde_svand_u16_z(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u16_z
#define svand_n_u16_z(pg, op1, op2) simde_svand_n_u16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svand_n_u16_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u16_m(pg, op1, op2);
#else
return simde_svand_u16_m(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u16_m
#define svand_n_u16_m(pg, op1, op2) simde_svand_n_u16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svand_n_u16_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u16_x(pg, op1, op2);
#else
return simde_svand_u16_x(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u16_x
#define svand_n_u16_x(pg, op1, op2) simde_svand_n_u16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svand_u32_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u32_z(pg, op1, op2);
#else
return simde_svreinterpret_u32_s32(simde_svand_s32_z(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u32_z
#define svand_u32_z(pg, op1, op2) simde_svand_u32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svand_u32_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u32_m(pg, op1, op2);
#else
return simde_svreinterpret_u32_s32(simde_svand_s32_m(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u32_m
#define svand_u32_m(pg, op1, op2) simde_svand_u32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svand_u32_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u32_x(pg, op1, op2);
#else
return simde_svreinterpret_u32_s32(simde_svand_s32_x(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u32_x
#define svand_u32_x(pg, op1, op2) simde_svand_u32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svand_n_u32_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u32_z(pg, op1, op2);
#else
return simde_svand_u32_z(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u32_z
#define svand_n_u32_z(pg, op1, op2) simde_svand_n_u32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svand_n_u32_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u32_m(pg, op1, op2);
#else
return simde_svand_u32_m(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u32_m
#define svand_n_u32_m(pg, op1, op2) simde_svand_n_u32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svand_n_u32_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u32_x(pg, op1, op2);
#else
return simde_svand_u32_x(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u32_x
#define svand_n_u32_x(pg, op1, op2) simde_svand_n_u32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svand_u64_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u64_z(pg, op1, op2);
#else
return simde_svreinterpret_u64_s64(simde_svand_s64_z(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u64_z
#define svand_u64_z(pg, op1, op2) simde_svand_u64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svand_u64_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u64_m(pg, op1, op2);
#else
return simde_svreinterpret_u64_s64(simde_svand_s64_m(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u64_m
#define svand_u64_m(pg, op1, op2) simde_svand_u64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svand_u64_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u64_x(pg, op1, op2);
#else
return simde_svreinterpret_u64_s64(simde_svand_s64_x(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_u64_x
#define svand_u64_x(pg, op1, op2) simde_svand_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svand_n_u64_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u64_z(pg, op1, op2);
#else
return simde_svand_u64_z(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u64_z
#define svand_n_u64_x(pg, op1, op2) simde_svand_n_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svand_n_u64_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u64_m(pg, op1, op2);
#else
return simde_svand_u64_m(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u64_m
#define svand_n_u64_x(pg, op1, op2) simde_svand_n_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svand_n_u64_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_n_u64_x(pg, op1, op2);
#else
return simde_svand_u64_x(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svand_n_u64_x
#define svand_n_u64_x(pg, op1, op2) simde_svand_n_u64_x(pg, op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svand_s8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svand_s16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svand_s32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svand_s64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svand_u8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svand_u16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svand_u32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svand_u64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svand_s8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svand_s16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svand_s32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svand_s64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svand_u8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svand_u16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svand_u32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svand_u64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svand_s8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svand_s16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svand_s32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svand_s64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svand_u8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svand_u16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svand_u32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svand_u64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svand_n_s8_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svand_n_s16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svand_n_s32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svand_n_s64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svand_n_u8_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svand_n_u16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svand_n_u32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svand_n_u64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svand_n_s8_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svand_n_s16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svand_n_s32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svand_n_s64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svand_n_u8_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svand_n_u16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svand_n_u32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svand_n_u64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svand_n_s8_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svand_n_s16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svand_n_s32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svand_n_s64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svand_n_u8_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svand_n_u16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svand_n_u32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svand_n_u64_x(pg, op1, op2); }
#elif defined(SIMDE_GENERIC_)
#define simde_svand_z(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svand_s8_z, \
simde_svint16_t: simde_svand_s16_z, \
simde_svint32_t: simde_svand_s32_z, \
simde_svint64_t: simde_svand_s64_z, \
simde_svuint8_t: simde_svand_u8_z, \
simde_svuint16_t: simde_svand_u16_z, \
simde_svuint32_t: simde_svand_u32_z, \
simde_svuint64_t: simde_svand_u64_z, \
int8_t: simde_svand_n_s8_z, \
int16_t: simde_svand_n_s16_z, \
int32_t: simde_svand_n_s32_z, \
int64_t: simde_svand_n_s64_z, \
uint8_t: simde_svand_n_u8_z, \
uint16_t: simde_svand_n_u16_z, \
uint32_t: simde_svand_n_u32_z, \
uint64_t: simde_svand_n_u64_z)((pg), (op1), (op2)))
#define simde_svand_m(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svand_s8_m, \
simde_svint16_t: simde_svand_s16_m, \
simde_svint32_t: simde_svand_s32_m, \
simde_svint64_t: simde_svand_s64_m, \
simde_svuint8_t: simde_svand_u8_m, \
simde_svuint16_t: simde_svand_u16_m, \
simde_svuint32_t: simde_svand_u32_m, \
simde_svuint64_t: simde_svand_u64_m, \
int8_t: simde_svand_n_s8_m, \
int16_t: simde_svand_n_s16_m, \
int32_t: simde_svand_n_s32_m, \
int64_t: simde_svand_n_s64_m, \
uint8_t: simde_svand_n_u8_m, \
uint16_t: simde_svand_n_u16_m, \
uint32_t: simde_svand_n_u32_m, \
uint64_t: simde_svand_n_u64_m)((pg), (op1), (op2)))
#define simde_svand_x(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svand_s8_x, \
simde_svint16_t: simde_svand_s16_x, \
simde_svint32_t: simde_svand_s32_x, \
simde_svint64_t: simde_svand_s64_x, \
simde_svuint8_t: simde_svand_u8_x, \
simde_svuint16_t: simde_svand_u16_x, \
simde_svuint32_t: simde_svand_u32_x, \
simde_svuint64_t: simde_svand_u64_x, \
int8_t: simde_svand_n_s8_x, \
int16_t: simde_svand_n_s16_x, \
int32_t: simde_svand_n_s32_x, \
int64_t: simde_svand_n_s64_x, \
uint8_t: simde_svand_n_u8_x, \
uint16_t: simde_svand_n_u16_x, \
uint32_t: simde_svand_n_u32_x, \
uint64_t: simde_svand_n_u64_x)((pg), (op1), (op2)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svand_x
#undef svand_z
#undef svand_m
#define svand_x(pg, op1, op2) simde_svand_x((pg), (op1), (op2))
#define svand_z(pg, op1, op2) simde_svand_z((pg), (op1), (op2))
#define svand_m(pg, op1, op2) simde_svand_m((pg), (op1), (op2))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_AND_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/cnt.h | .h | 2,576 | 94 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_CNT_H)
#define SIMDE_ARM_SVE_CNT_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_svcntb(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcntb();
#else
return sizeof(simde_svint8_t) / sizeof(int8_t);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcntb
#define svcntb() simde_svcntb()
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_svcnth(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcnth();
#else
return sizeof(simde_svint16_t) / sizeof(int16_t);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcnth
#define svcnth() simde_svcnth()
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_svcntw(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcntw();
#else
return sizeof(simde_svint32_t) / sizeof(int32_t);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcntw
#define svcntw() simde_svcntw()
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_svcntd(void) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svcntd();
#else
return sizeof(simde_svint64_t) / sizeof(int64_t);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svcntd
#define svcntd() simde_svcntd()
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_CNT_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/st1.h | .h | 13,374 | 308 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_ST1_H)
#define SIMDE_ARM_SVE_ST1_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_s8(simde_svbool_t pg, int8_t * base, simde_svint8_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_s8(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi8(base, simde_svbool_to_mmask64(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi8(base, simde_svbool_to_mmask32(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
if (pg.values_i8[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_s8
#define svst1_s8(pg, base, data) simde_svst1_s8((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_s16(simde_svbool_t pg, int16_t * base, simde_svint16_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_s16(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi16(base, simde_svbool_to_mmask32(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi16(base, simde_svbool_to_mmask16(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
if (pg.values_i16[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_s16
#define svst1_s16(pg, base, data) simde_svst1_s16((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_s32(simde_svbool_t pg, int32_t * base, simde_svint32_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_s32(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi32(base, simde_svbool_to_mmask16(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi32(base, simde_svbool_to_mmask8(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
if (pg.values_i32[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_s32
#define svst1_s32(pg, base, data) simde_svst1_s32((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_s64(simde_svbool_t pg, int64_t * base, simde_svint64_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_s64(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi64(base, simde_svbool_to_mmask8(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi64(base, simde_svbool_to_mmask4(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
if (pg.values_i64[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_s64
#define svst1_s64(pg, base, data) simde_svst1_s64((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_u8(simde_svbool_t pg, uint8_t * base, simde_svuint8_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_u8(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi8(base, simde_svbool_to_mmask64(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi8(base, simde_svbool_to_mmask32(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) {
if (pg.values_u8[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_u8
#define svst1_u8(pg, base, data) simde_svst1_u8((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_u16(simde_svbool_t pg, uint16_t * base, simde_svuint16_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_u16(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi16(base, simde_svbool_to_mmask32(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi16(base, simde_svbool_to_mmask16(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) {
if (pg.values_u16[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_u16
#define svst1_u16(pg, base, data) simde_svst1_u16((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_u32(simde_svbool_t pg, uint32_t * base, simde_svuint32_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_u32(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi32(base, simde_svbool_to_mmask16(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi32(base, simde_svbool_to_mmask8(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
if (pg.values_u32[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_u32
#define svst1_u32(pg, base, data) simde_svst1_u32((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_u64(simde_svbool_t pg, uint64_t * base, simde_svuint64_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_u64(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_epi64(base, simde_svbool_to_mmask8(pg), data.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_epi64(base, simde_svbool_to_mmask4(pg), data.m256i[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
if (pg.values_u64[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_u64
#define svst1_u64(pg, base, data) simde_svst1_u64((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_f32(simde_svbool_t pg, simde_float32 * base, simde_svfloat32_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_f32(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_ps(base, simde_svbool_to_mmask16(pg), data.m512);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_ps(base, simde_svbool_to_mmask8(pg), data.m256[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) {
if (pg.values_i32[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_f32
#define svst1_f32(pg, base, data) simde_svst1_f32((pg), (base), (data))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_svst1_f64(simde_svbool_t pg, simde_float64 * base, simde_svfloat64_t data) {
#if defined(SIMDE_ARM_SVE_NATIVE)
svst1_f64(pg, base, data);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm512_mask_storeu_pd(base, simde_svbool_to_mmask8(pg), data.m512d);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
_mm256_mask_storeu_pd(base, simde_svbool_to_mmask4(pg), data.m256d[0]);
#else
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) {
if (pg.values_i64[i]) {
base[i] = data.values[i];
}
}
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svst1_f64
#define svst1_f64(pg, base, data) simde_svst1_f64((pg), (base), (data))
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int8_t * base, simde_svint8_t data) { simde_svst1_s8 (pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int16_t * base, simde_svint16_t data) { simde_svst1_s16(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int32_t * base, simde_svint32_t data) { simde_svst1_s32(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int64_t * base, simde_svint64_t data) { simde_svst1_s64(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint8_t * base, simde_svuint8_t data) { simde_svst1_u8 (pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint16_t * base, simde_svuint16_t data) { simde_svst1_u16(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint32_t * base, simde_svuint32_t data) { simde_svst1_u32(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint64_t * base, simde_svuint64_t data) { simde_svst1_u64(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, simde_float32 * base, simde_svfloat32_t data) { simde_svst1_f32(pg, base, data); }
SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, simde_float64 * base, simde_svfloat64_t data) { simde_svst1_f64(pg, base, data); }
#elif defined(SIMDE_GENERIC_)
#define simde_svst1(pg, base, data) \
(SIMDE_GENERIC_((data), \
simde_svint8_t: simde_svst1_s8 , \
simde_svint16_t: simde_svst1_s16, \
simde_svint32_t: simde_svst1_s32, \
simde_svint64_t: simde_svst1_s64, \
simde_svuint8_t: simde_svst1_u8 , \
simde_svuint16_t: simde_svst1_u16, \
simde_svuint32_t: simde_svst1_u32, \
simde_svuint64_t: simde_svst1_u64, \
simde_svfloat32_t: simde_svst1_f32, \
simde_svfloat64_t: simde_svst1_f64)((pg), (base), (data)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svst1
#define svst1(pg, base, data) simde_svst1((pg), (base), (data))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_ST1_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/add.h | .h | 55,418 | 1,351 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_ADD_H)
#define SIMDE_ARM_SVE_ADD_H
#include "types.h"
#include "sel.h"
#include "dup.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svadd_s8_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s8_x(pg, op1, op2);
#else
simde_svint8_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_s8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi8(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi8(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi8(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi8(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s8_x
#define svadd_s8_x(pg, op1, op2) simde_svadd_s8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svadd_s8_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s8_z(pg, op1, op2);
#else
return simde_x_svsel_s8_z(pg, simde_svadd_s8_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s8_z
#define svadd_s8_z(pg, op1, op2) simde_svadd_s8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svadd_s8_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s8_m(pg, op1, op2);
#else
return simde_svsel_s8(pg, simde_svadd_s8_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s8_m
#define svadd_s8_m(pg, op1, op2) simde_svadd_s8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svadd_n_s8_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s8_x(pg, op1, op2);
#else
return simde_svadd_s8_x(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s8_x
#define svadd_n_s8_x(pg, op1, op2) simde_svadd_n_s8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svadd_n_s8_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s8_z(pg, op1, op2);
#else
return simde_svadd_s8_z(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s8_z
#define svadd_n_s8_z(pg, op1, op2) simde_svadd_n_s8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svadd_n_s8_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s8_m(pg, op1, op2);
#else
return simde_svadd_s8_m(pg, op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s8_m
#define svadd_n_s8_m(pg, op1, op2) simde_svadd_n_s8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svadd_s16_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s16_x(pg, op1, op2);
#else
simde_svint16_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_s16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi16(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi16(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi16(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi16(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s16_x
#define svadd_s16_x(pg, op1, op2) simde_svadd_s16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svadd_s16_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s16_z(pg, op1, op2);
#else
return simde_x_svsel_s16_z(pg, simde_svadd_s16_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s16_z
#define svadd_s16_z(pg, op1, op2) simde_svadd_s16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svadd_s16_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s16_m(pg, op1, op2);
#else
return simde_svsel_s16(pg, simde_svadd_s16_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s16_m
#define svadd_s16_m(pg, op1, op2) simde_svadd_s16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svadd_n_s16_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s16_x(pg, op1, op2);
#else
return simde_svadd_s16_x(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s16_x
#define svadd_n_s16_x(pg, op1, op2) simde_svadd_n_s16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svadd_n_s16_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s16_z(pg, op1, op2);
#else
return simde_svadd_s16_z(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s16_z
#define svadd_n_s16_z(pg, op1, op2) simde_svadd_n_s16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svadd_n_s16_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s16_m(pg, op1, op2);
#else
return simde_svadd_s16_m(pg, op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s16_m
#define svadd_n_s16_m(pg, op1, op2) simde_svadd_n_s16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svadd_s32_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s32_x(pg, op1, op2);
#else
simde_svint32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_s32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi32(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi32(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi32(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi32(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i32x4_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s32_x
#define svadd_s32_x(pg, op1, op2) simde_svadd_s32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svadd_s32_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s32_z(pg, op1, op2);
#else
return simde_x_svsel_s32_z(pg, simde_svadd_s32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s32_z
#define svadd_s32_z(pg, op1, op2) simde_svadd_s32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svadd_s32_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s32_m(pg, op1, op2);
#else
return simde_svsel_s32(pg, simde_svadd_s32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s32_m
#define svadd_s32_m(pg, op1, op2) simde_svadd_s32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svadd_n_s32_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s32_x(pg, op1, op2);
#else
return simde_svadd_s32_x(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s32_x
#define svadd_n_s32_x(pg, op1, op2) simde_svadd_n_s32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svadd_n_s32_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s32_z(pg, op1, op2);
#else
return simde_svadd_s32_z(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s32_z
#define svadd_n_s32_z(pg, op1, op2) simde_svadd_n_s32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svadd_n_s32_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s32_m(pg, op1, op2);
#else
return simde_svadd_s32_m(pg, op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s32_m
#define svadd_n_s32_m(pg, op1, op2) simde_svadd_n_s32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svadd_s64_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s64_x(pg, op1, op2);
#else
simde_svint64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_s64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi64(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi64(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi64(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi64(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i64x2_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s64_x
#define svadd_s64_x(pg, op1, op2) simde_svadd_s64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svadd_s64_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s64_z(pg, op1, op2);
#else
return simde_x_svsel_s64_z(pg, simde_svadd_s64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s64_z
#define svadd_s64_z(pg, op1, op2) simde_svadd_s64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svadd_s64_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_s64_m(pg, op1, op2);
#else
return simde_svsel_s64(pg, simde_svadd_s64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_s64_m
#define svadd_s64_m(pg, op1, op2) simde_svadd_s64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svadd_n_s64_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s64_x(pg, op1, op2);
#else
return simde_svadd_s64_x(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s64_x
#define svadd_n_s64_x(pg, op1, op2) simde_svadd_n_s64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svadd_n_s64_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s64_z(pg, op1, op2);
#else
return simde_svadd_s64_z(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s64_z
#define svadd_n_s64_z(pg, op1, op2) simde_svadd_n_s64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svadd_n_s64_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_s64_m(pg, op1, op2);
#else
return simde_svadd_s64_m(pg, op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_s64_m
#define svadd_n_s64_m(pg, op1, op2) simde_svadd_n_s64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svadd_u8_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u8_x(pg, op1, op2);
#else
simde_svuint8_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_u8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi8(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi8(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi8(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi8(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u8_x
#define svadd_u8_x(pg, op1, op2) simde_svadd_u8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svadd_u8_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u8_z(pg, op1, op2);
#else
return simde_x_svsel_u8_z(pg, simde_svadd_u8_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u8_z
#define svadd_u8_z(pg, op1, op2) simde_svadd_u8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svadd_u8_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u8_m(pg, op1, op2);
#else
return simde_svsel_u8(pg, simde_svadd_u8_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u8_m
#define svadd_u8_m(pg, op1, op2) simde_svadd_u8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svadd_n_u8_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u8_x(pg, op1, op2);
#else
return simde_svadd_u8_x(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u8_x
#define svadd_n_u8_x(pg, op1, op2) simde_svadd_n_u8_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svadd_n_u8_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u8_z(pg, op1, op2);
#else
return simde_svadd_u8_z(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u8_z
#define svadd_n_u8_z(pg, op1, op2) simde_svadd_n_u8_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svadd_n_u8_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u8_m(pg, op1, op2);
#else
return simde_svadd_u8_m(pg, op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u8_m
#define svadd_n_u8_m(pg, op1, op2) simde_svadd_n_u8_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svadd_u16_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u16_x(pg, op1, op2);
#else
simde_svuint16_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_u16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi16(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi16(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi16(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi16(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u16_x
#define svadd_u16_x(pg, op1, op2) simde_svadd_u16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svadd_u16_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u16_z(pg, op1, op2);
#else
return simde_x_svsel_u16_z(pg, simde_svadd_u16_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u16_z
#define svadd_u16_z(pg, op1, op2) simde_svadd_u16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svadd_u16_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u16_m(pg, op1, op2);
#else
return simde_svsel_u16(pg, simde_svadd_u16_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u16_m
#define svadd_u16_m(pg, op1, op2) simde_svadd_u16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svadd_n_u16_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u16_x(pg, op1, op2);
#else
return simde_svadd_u16_x(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u16_x
#define svadd_n_u16_x(pg, op1, op2) simde_svadd_n_u16_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svadd_n_u16_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u16_z(pg, op1, op2);
#else
return simde_svadd_u16_z(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u16_z
#define svadd_n_u16_z(pg, op1, op2) simde_svadd_n_u16_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svadd_n_u16_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u16_m(pg, op1, op2);
#else
return simde_svadd_u16_m(pg, op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u16_m
#define svadd_n_u16_m(pg, op1, op2) simde_svadd_n_u16_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svadd_u32_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u32_x(pg, op1, op2);
#else
simde_svuint32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_u32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi32(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi32(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi32(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi32(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i32x4_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u32_x
#define svadd_u32_x(pg, op1, op2) simde_svadd_u32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svadd_u32_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u32_z(pg, op1, op2);
#else
return simde_x_svsel_u32_z(pg, simde_svadd_u32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u32_z
#define svadd_u32_z(pg, op1, op2) simde_svadd_u32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svadd_u32_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u32_m(pg, op1, op2);
#else
return simde_svsel_u32(pg, simde_svadd_u32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u32_m
#define svadd_u32_m(pg, op1, op2) simde_svadd_u32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svadd_n_u32_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u32_x(pg, op1, op2);
#else
return simde_svadd_u32_x(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u32_x
#define svadd_n_u32_x(pg, op1, op2) simde_svadd_n_u32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svadd_n_u32_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u32_z(pg, op1, op2);
#else
return simde_svadd_u32_z(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u32_z
#define svadd_n_u32_z(pg, op1, op2) simde_svadd_n_u32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svadd_n_u32_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u32_m(pg, op1, op2);
#else
return simde_svadd_u32_m(pg, op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u32_m
#define svadd_n_u32_m(pg, op1, op2) simde_svadd_n_u32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svadd_u64_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u64_x(pg, op1, op2);
#else
simde_svuint64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_u64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_add_epi64(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256i[0] = _mm256_add_epi64(op1.m256i[0], op2.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_add_epi64(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_add_epi64(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i64x2_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u64_x
#define svadd_u64_x(pg, op1, op2) simde_svadd_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svadd_u64_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u64_z(pg, op1, op2);
#else
return simde_x_svsel_u64_z(pg, simde_svadd_u64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u64_z
#define svadd_u64_z(pg, op1, op2) simde_svadd_u64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svadd_u64_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_u64_m(pg, op1, op2);
#else
return simde_svsel_u64(pg, simde_svadd_u64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_u64_m
#define svadd_u64_m(pg, op1, op2) simde_svadd_u64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svadd_n_u64_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u64_x(pg, op1, op2);
#else
return simde_svadd_u64_x(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u64_x
#define svadd_n_u64_x(pg, op1, op2) simde_svadd_n_u64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svadd_n_u64_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u64_z(pg, op1, op2);
#else
return simde_svadd_u64_z(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u64_z
#define svadd_n_u64_z(pg, op1, op2) simde_svadd_n_u64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svadd_n_u64_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_u64_m(pg, op1, op2);
#else
return simde_svadd_u64_m(pg, op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_u64_m
#define svadd_n_u64_m(pg, op1, op2) simde_svadd_n_u64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svadd_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_f32_x(pg, op1, op2);
#else
simde_svfloat32_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vaddq_f32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512 = _mm512_add_ps(op1.m512, op2.m512);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256[0] = _mm256_add_ps(op1.m256[0], op2.m256[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256) / sizeof(r.m256[0])) ; i++) {
r.m256[i] = _mm256_add_ps(op1.m256[i], op2.m256[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128) / sizeof(r.m128[0])) ; i++) {
r.m128[i] = _mm_add_ps(op1.m128[i], op2.m128[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_f32x4_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_f32_x
#define svadd_f32_x(pg, op1, op2) simde_svadd_f32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svadd_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_f32_z(pg, op1, op2);
#else
return simde_x_svsel_f32_z(pg, simde_svadd_f32_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_f32_z
#define svadd_f32_z(pg, op1, op2) simde_svadd_f32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svadd_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_f32_m(pg, op1, op2);
#else
return simde_svsel_f32(pg, simde_svadd_f32_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_f32_m
#define svadd_f32_m(pg, op1, op2) simde_svadd_f32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svadd_n_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_f32_x(pg, op1, op2);
#else
return simde_svadd_f32_x(pg, op1, simde_svdup_n_f32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_f32_x
#define svadd_n_f32_x(pg, op1, op2) simde_svadd_n_f32_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svadd_n_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_f32_z(pg, op1, op2);
#else
return simde_svadd_f32_z(pg, op1, simde_svdup_n_f32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_f32_z
#define svadd_n_f32_z(pg, op1, op2) simde_svadd_n_f32_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svadd_n_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_f32_m(pg, op1, op2);
#else
return simde_svadd_f32_m(pg, op1, simde_svdup_n_f32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_f32_m
#define svadd_n_f32_m(pg, op1, op2) simde_svadd_n_f32_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svadd_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_f64_x(pg, op1, op2);
#else
simde_svfloat64_t r;
HEDLEY_STATIC_CAST(void, pg);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r.neon = vaddq_f64(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512d = _mm512_add_pd(op1.m512d, op2.m512d);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
r.m256d[0] = _mm256_add_pd(op1.m256d[0], op2.m256d[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256d) / sizeof(r.m256d[0])) ; i++) {
r.m256d[i] = _mm256_add_pd(op1.m256d[i], op2.m256d[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128d) / sizeof(r.m128d[0])) ; i++) {
r.m128d[i] = _mm_add_pd(op1.m128d[i], op2.m128d[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r.altivec = vec_add(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = op1.altivec + op2.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_f64x2_add(op1.v128, op2.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = op1.values + op2.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op1.values[i] + op2.values[i];
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_f64_x
#define svadd_f64_x(pg, op1, op2) simde_svadd_f64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svadd_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_f64_z(pg, op1, op2);
#else
return simde_x_svsel_f64_z(pg, simde_svadd_f64_x(pg, op1, op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_f64_z
#define svadd_f64_z(pg, op1, op2) simde_svadd_f64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svadd_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_f64_m(pg, op1, op2);
#else
return simde_svsel_f64(pg, simde_svadd_f64_x(pg, op1, op2), op1);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_f64_m
#define svadd_f64_m(pg, op1, op2) simde_svadd_f64_m(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svadd_n_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_f64_x(pg, op1, op2);
#else
return simde_svadd_f64_x(pg, op1, simde_svdup_n_f64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_f64_x
#define svadd_n_f64_x(pg, op1, op2) simde_svadd_n_f64_x(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svadd_n_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_f64_z(pg, op1, op2);
#else
return simde_svadd_f64_z(pg, op1, simde_svdup_n_f64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_f64_z
#define svadd_n_f64_z(pg, op1, op2) simde_svadd_n_f64_z(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svadd_n_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svadd_n_f64_m(pg, op1, op2);
#else
return simde_svadd_f64_m(pg, op1, simde_svdup_n_f64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svadd_n_f64_m
#define svadd_n_f64_m(pg, op1, op2) simde_svadd_n_f64_m(pg, op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svadd_s8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svadd_s16_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svadd_s32_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svadd_s64_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svadd_u8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svadd_u16_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svadd_u32_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svadd_u64_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svadd_f32_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svadd_f64_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svadd_s8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svadd_s16_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svadd_s32_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svadd_s64_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svadd_u8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svadd_u16_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svadd_u32_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svadd_u64_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svadd_f32_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svadd_f64_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svadd_s8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svadd_s16_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svadd_s32_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svadd_s64_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svadd_u8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svadd_u16_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svadd_u32_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svadd_u64_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svadd_f32_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svadd_f64_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svadd_n_s8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svadd_n_s16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svadd_n_s32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svadd_n_s64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svadd_n_u8_x (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svadd_n_u16_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svadd_n_u32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svadd_n_u64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svadd_n_f32_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svadd_n_f64_x(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svadd_n_s8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svadd_n_s16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svadd_n_s32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svadd_n_s64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svadd_n_u8_z (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svadd_n_u16_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svadd_n_u32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svadd_n_u64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svadd_n_f32_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svadd_n_f64_z(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svadd_n_s8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svadd_n_s16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svadd_n_s32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svadd_n_s64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svadd_n_u8_m (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svadd_n_u16_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svadd_n_u32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svadd_n_u64_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svadd_n_f32_m(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svadd_n_f64_m(pg, op1, op2); }
#elif defined(SIMDE_GENERIC_)
#define simde_svadd_x(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svadd_s8_x, \
simde_svint16_t: simde_svadd_s16_x, \
simde_svint32_t: simde_svadd_s32_x, \
simde_svint64_t: simde_svadd_s64_x, \
simde_svuint8_t: simde_svadd_u8_x, \
simde_svuint16_t: simde_svadd_u16_x, \
simde_svuint32_t: simde_svadd_u32_x, \
simde_svuint64_t: simde_svadd_u64_x, \
simde_svfloat32_t: simde_svadd_f32_x, \
simde_svfloat64_t: simde_svadd_f64_x, \
int8_t: simde_svadd_n_s8_x, \
int16_t: simde_svadd_n_s16_x, \
int32_t: simde_svadd_n_s32_x, \
int64_t: simde_svadd_n_s64_x, \
uint8_t: simde_svadd_n_u8_x, \
uint16_t: simde_svadd_n_u16_x, \
uint32_t: simde_svadd_n_u32_x, \
uint64_t: simde_svadd_n_u64_x, \
simde_float32: simde_svadd_n_f32_x, \
simde_float64: simde_svadd_n_f64_x)((pg), (op1), (op2)))
#define simde_svadd_z(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svadd_s8_z, \
simde_svint16_t: simde_svadd_s16_z, \
simde_svint32_t: simde_svadd_s32_z, \
simde_svint64_t: simde_svadd_s64_z, \
simde_svuint8_t: simde_svadd_u8_z, \
simde_svuint16_t: simde_svadd_u16_z, \
simde_svuint32_t: simde_svadd_u32_z, \
simde_svuint64_t: simde_svadd_u64_z, \
simde_svfloat32_t: simde_svadd_f32_z, \
simde_svfloat64_t: simde_svadd_f64_z, \
int8_t: simde_svadd_n_s8_z, \
int16_t: simde_svadd_n_s16_z, \
int32_t: simde_svadd_n_s32_z, \
int64_t: simde_svadd_n_s64_z, \
uint8_t: simde_svadd_n_u8_z, \
uint16_t: simde_svadd_n_u16_z, \
uint32_t: simde_svadd_n_u32_z, \
uint64_t: simde_svadd_n_u64_z, \
simde_float32: simde_svadd_n_f32_z, \
simde_float64: simde_svadd_n_f64_z)((pg), (op1), (op2)))
#define simde_svadd_m(pg, op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svadd_s8_m, \
simde_svint16_t: simde_svadd_s16_m, \
simde_svint32_t: simde_svadd_s32_m, \
simde_svint64_t: simde_svadd_s64_m, \
simde_svuint8_t: simde_svadd_u8_m, \
simde_svuint16_t: simde_svadd_u16_m, \
simde_svuint32_t: simde_svadd_u32_m, \
simde_svuint64_t: simde_svadd_u64_m, \
simde_svfloat32_t: simde_svadd_f32_m, \
simde_svfloat64_t: simde_svadd_f64_m, \
int8_t: simde_svadd_n_s8_m, \
int16_t: simde_svadd_n_s16_m, \
int32_t: simde_svadd_n_s32_m, \
int64_t: simde_svadd_n_s64_m, \
uint8_t: simde_svadd_n_u8_m, \
uint16_t: simde_svadd_n_u16_m, \
uint32_t: simde_svadd_n_u32_m, \
uint64_t: simde_svadd_n_u64_m, \
simde_float32: simde_svadd_n_f32_m, \
simde_float64: simde_svadd_n_f64_m)((pg), (op1), (op2)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svadd_x
#undef svadd_z
#undef svadd_m
#undef svadd_n_x
#undef svadd_n_z
#undef svadd_n_m
#define svadd_x(pg, op1, op2) simde_svadd_x((pg), (op1), (op2))
#define svadd_z(pg, op1, op2) simde_svadd_z((pg), (op1), (op2))
#define svadd_m(pg, op1, op2) simde_svadd_m((pg), (op1), (op2))
#define svadd_n_x(pg, op1, op2) simde_svadd_n_x((pg), (op1), (op2))
#define svadd_n_z(pg, op1, op2) simde_svadd_n_z((pg), (op1), (op2))
#define svadd_n_m(pg, op1, op2) simde_svadd_n_m((pg), (op1), (op2))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_ADD_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/dup.h | .h | 43,805 | 1,134 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_DUP_H)
#define SIMDE_ARM_SVE_DUP_H
#include "types.h"
#include "reinterpret.h"
#include "sel.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svdup_n_s8(int8_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s8(op);
#else
simde_svint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_s8(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi8(op);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi8(op);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi8(op);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_splat(op);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s8
#define svdup_n_s8(op) simde_svdup_n_s8((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svdup_s8(int8_t op) {
return simde_svdup_n_s8(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s8
#define svdup_s8(op) simde_svdup_n_s8((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svdup_n_s8_z(simde_svbool_t pg, int8_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s8_z(pg, op);
#else
return simde_x_svsel_s8_z(pg, simde_svdup_n_s8(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s8_z
#define svdup_n_s8_z(pg, op) simde_svdup_n_s8_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svdup_s8_z(simde_svbool_t pg, int8_t op) {
return simde_svdup_n_s8_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s8_z
#define svdup_s8_z(pg, op) simde_svdup_n_s8_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svdup_n_s8_m(simde_svint8_t inactive, simde_svbool_t pg, int8_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s8_m(inactive, pg, op);
#else
return simde_svsel_s8(pg, simde_svdup_n_s8(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s8_m
#define svdup_n_s8_m(inactive, pg, op) simde_svdup_n_s8_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svdup_s8_m(simde_svint8_t inactive, simde_svbool_t pg, int8_t op) {
return simde_svdup_n_s8_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s8_m
#define svdup_s8_m(inactive, pg, op) simde_svdup_n_s8_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svdup_n_s16(int16_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s16(op);
#else
simde_svint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_s16(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi16(op);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi16(op);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi16(op);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_splat(op);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s16
#define svdup_n_s16(op) simde_svdup_n_s16((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svdup_s16(int16_t op) {
return simde_svdup_n_s16(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s16
#define svdup_s16(op) simde_svdup_n_s16((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svdup_n_s16_z(simde_svbool_t pg, int16_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s16_z(pg, op);
#else
return simde_x_svsel_s16_z(pg, simde_svdup_n_s16(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s16_z
#define svdup_n_s16_z(pg, op) simde_svdup_n_s16_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svdup_s16_z(simde_svbool_t pg, int8_t op) {
return simde_svdup_n_s16_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s16_z
#define svdup_s16_z(pg, op) simde_svdup_n_s16_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svdup_n_s16_m(simde_svint16_t inactive, simde_svbool_t pg, int16_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s16_m(inactive, pg, op);
#else
return simde_svsel_s16(pg, simde_svdup_n_s16(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s16_m
#define svdup_n_s16_m(inactive, pg, op) simde_svdup_n_s16_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svdup_s16_m(simde_svint16_t inactive, simde_svbool_t pg, int16_t op) {
return simde_svdup_n_s16_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s16_m
#define svdup_s16_m(inactive, pg, op) simde_svdup_n_s16_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svdup_n_s32(int32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s32(op);
#else
simde_svint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_s32(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi32(op);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi32(op);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi32(op);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i32x4_splat(op);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s32
#define svdup_n_s32(op) simde_svdup_n_s32((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svdup_s32(int8_t op) {
return simde_svdup_n_s32(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s32
#define svdup_s32(op) simde_svdup_n_s32((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svdup_n_s32_z(simde_svbool_t pg, int32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s32_z(pg, op);
#else
return simde_x_svsel_s32_z(pg, simde_svdup_n_s32(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s32_z
#define svdup_n_s32_z(pg, op) simde_svdup_n_s32_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svdup_s32_z(simde_svbool_t pg, int32_t op) {
return simde_svdup_n_s32_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s32_z
#define svdup_s32_z(pg, op) simde_svdup_n_s32_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svdup_n_s32_m(simde_svint32_t inactive, simde_svbool_t pg, int32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s32_m(inactive, pg, op);
#else
return simde_svsel_s32(pg, simde_svdup_n_s32(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s32_m
#define svdup_n_s32_m(inactive, pg, op) simde_svdup_n_s32_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svdup_s32_m(simde_svint32_t inactive, simde_svbool_t pg, int32_t op) {
return simde_svdup_n_s32_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s32_m
#define svdup_s32_m(inactive, pg, op) simde_svdup_n_s32_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svdup_n_s64(int64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s64(op);
#else
simde_svint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_s64(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi64(op);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi64x(op);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi64x(op);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(HEDLEY_STATIC_CAST(signed long long int, op));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i64x2_splat(op);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s64
#define svdup_n_s64(op) simde_svdup_n_s64((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svdup_s64(int64_t op) {
return simde_svdup_n_s64(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s64
#define svdup_s64(op) simde_svdup_n_s64((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svdup_n_s64_z(simde_svbool_t pg, int64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s64_z(pg, op);
#else
return simde_x_svsel_s64_z(pg, simde_svdup_n_s64(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s64_z
#define svdup_n_s64_z(pg, op) simde_svdup_n_s64_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svdup_s64_z(simde_svbool_t pg, int64_t op) {
return simde_svdup_n_s64_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s64_z
#define svdup_s64_z(pg, op) simde_svdup_n_f64_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svdup_n_s64_m(simde_svint64_t inactive, simde_svbool_t pg, int64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_s64_m(inactive, pg, op);
#else
return simde_svsel_s64(pg, simde_svdup_n_s64(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_s64_m
#define svdup_n_s64_m(inactive, pg, op) simde_svdup_n_s64_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svdup_s64_m(simde_svint64_t inactive, simde_svbool_t pg, int64_t op) {
return simde_svdup_n_s64_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_s64_m
#define svdup_s64_m(inactive, pg, op) simde_svdup_n_s64_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svdup_n_u8(uint8_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u8(op);
#else
simde_svuint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_u8(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi8(HEDLEY_STATIC_CAST(int8_t, op));
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi8(HEDLEY_STATIC_CAST(int8_t, op));
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, op));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, op));
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u8
#define svdup_n_u8(op) simde_svdup_n_u8((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svdup_u8(uint8_t op) {
return simde_svdup_n_u8(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u8
#define svdup_u8(op) simde_svdup_n_u8((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svdup_n_u8_z(simde_svbool_t pg, uint8_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u8_z(pg, op);
#else
return simde_x_svsel_u8_z(pg, simde_svdup_n_u8(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u8_z
#define svdup_n_u8_z(pg, op) simde_svdup_n_u8_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svdup_u8_z(simde_svbool_t pg, uint8_t op) {
return simde_svdup_n_u8_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u8_z
#define svdup_u8_z(pg, op) simde_svdup_n_u8_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svdup_n_u8_m(simde_svuint8_t inactive, simde_svbool_t pg, uint8_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u8_m(inactive, pg, op);
#else
return simde_svsel_u8(pg, simde_svdup_n_u8(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u8_m
#define svdup_n_u8_m(inactive, pg, op) simde_svdup_n_u8_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svdup_u8_m(simde_svuint8_t inactive, simde_svbool_t pg, uint8_t op) {
return simde_svdup_n_u8_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u8_m
#define svdup_u8_m(inactive, pg, op) simde_svdup_n_u8_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svdup_n_u16(uint16_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u16(op);
#else
simde_svuint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_u16(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi16(HEDLEY_STATIC_CAST(int16_t, op));
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi16(HEDLEY_STATIC_CAST(int16_t, op));
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, op));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, op));
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u16
#define svdup_n_u16(op) simde_svdup_n_u16((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svdup_u16(uint16_t op) {
return simde_svdup_n_u16(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u16
#define svdup_u16(op) simde_svdup_n_u16((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svdup_n_u16_z(simde_svbool_t pg, uint16_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u16_z(pg, op);
#else
return simde_x_svsel_u16_z(pg, simde_svdup_n_u16(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u16_z
#define svdup_n_u16_z(pg, op) simde_svdup_n_u16_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svdup_u16_z(simde_svbool_t pg, uint8_t op) {
return simde_svdup_n_u16_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u16_z
#define svdup_u16_z(pg, op) simde_svdup_n_u16_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svdup_n_u16_m(simde_svuint16_t inactive, simde_svbool_t pg, uint16_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u16_m(inactive, pg, op);
#else
return simde_svsel_u16(pg, simde_svdup_n_u16(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u16_m
#define svdup_n_u16_m(inactive, pg, op) simde_svdup_n_u16_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svdup_u16_m(simde_svuint16_t inactive, simde_svbool_t pg, uint16_t op) {
return simde_svdup_n_u16_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u16_m
#define svdup_u16_m(inactive, pg, op) simde_svdup_n_u16_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svdup_n_u32(uint32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u32(op);
#else
simde_svuint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_u32(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi32(HEDLEY_STATIC_CAST(int32_t, op));
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi32(HEDLEY_STATIC_CAST(int32_t, op));
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, op));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, op));
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u32
#define svdup_n_u32(op) simde_svdup_n_u32((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svdup_u32(uint8_t op) {
return simde_svdup_n_u32(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u32
#define svdup_u32(op) simde_svdup_n_u32((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svdup_n_u32_z(simde_svbool_t pg, uint32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u32_z(pg, op);
#else
return simde_x_svsel_u32_z(pg, simde_svdup_n_u32(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u32_z
#define svdup_n_u32_z(pg, op) simde_svdup_n_u32_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svdup_u32_z(simde_svbool_t pg, uint32_t op) {
return simde_svdup_n_u32_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u32_z
#define svdup_u32_z(pg, op) simde_svdup_n_u32_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svdup_n_u32_m(simde_svuint32_t inactive, simde_svbool_t pg, uint32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u32_m(inactive, pg, op);
#else
return simde_svsel_u32(pg, simde_svdup_n_u32(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u32_m
#define svdup_n_u32_m(inactive, pg, op) simde_svdup_n_u32_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svdup_u32_m(simde_svuint32_t inactive, simde_svbool_t pg, uint32_t op) {
return simde_svdup_n_u32_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u32_m
#define svdup_u32_m(inactive, pg, op) simde_svdup_n_u32_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svdup_n_u64(uint64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u64(op);
#else
simde_svuint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_u64(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_set1_epi64(HEDLEY_STATIC_CAST(int64_t, op));
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, op));
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, op));
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(HEDLEY_STATIC_CAST(unsigned long long int, op));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i64x2_splat(HEDLEY_STATIC_CAST(int64_t, op));
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u64
#define svdup_n_u64(op) simde_svdup_n_u64((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svdup_u64(uint64_t op) {
return simde_svdup_n_u64(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u64
#define svdup_u64(op) simde_svdup_n_u64((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svdup_n_u64_z(simde_svbool_t pg, uint64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u64_z(pg, op);
#else
return simde_x_svsel_u64_z(pg, simde_svdup_n_u64(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u64_z
#define svdup_n_u64_z(pg, op) simde_svdup_n_u64_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svdup_u64_z(simde_svbool_t pg, uint64_t op) {
return simde_svdup_n_u64_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u64_z
#define svdup_u64_z(pg, op) simde_svdup_n_f64_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svdup_n_u64_m(simde_svuint64_t inactive, simde_svbool_t pg, uint64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_u64_m(inactive, pg, op);
#else
return simde_svsel_u64(pg, simde_svdup_n_u64(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_u64_m
#define svdup_n_u64_m(inactive, pg, op) simde_svdup_n_u64_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svdup_u64_m(simde_svuint64_t inactive, simde_svbool_t pg, uint64_t op) {
return simde_svdup_n_u64_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_u64_m
#define svdup_u64_m(inactive, pg, op) simde_svdup_n_u64_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svdup_n_f32(simde_float32 op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_f32(op);
#else
simde_svfloat32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vdupq_n_f32(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512 = _mm512_set1_ps(op);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256) / sizeof(r.m256[0])) ; i++) {
r.m256[i] = _mm256_set1_ps(op);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128) / sizeof(r.m128[0])) ; i++) {
r.m128[i] = _mm_set1_ps(op);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_f32x4_splat(op);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_f32
#define svdup_n_f32(op) simde_svdup_n_f32((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svdup_f32(int8_t op) {
return simde_svdup_n_f32(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_f32
#define svdup_f32(op) simde_svdup_n_f32((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svdup_n_f32_z(simde_svbool_t pg, simde_float32 op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_f32_z(pg, op);
#else
return simde_x_svsel_f32_z(pg, simde_svdup_n_f32(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_f32_z
#define svdup_n_f32_z(pg, op) simde_svdup_n_f32_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svdup_f32_z(simde_svbool_t pg, simde_float32 op) {
return simde_svdup_n_f32_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_f32_z
#define svdup_f32_z(pg, op) simde_svdup_n_f32_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svdup_n_f32_m(simde_svfloat32_t inactive, simde_svbool_t pg, simde_float32_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_f32_m(inactive, pg, op);
#else
return simde_svsel_f32(pg, simde_svdup_n_f32(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_f32_m
#define svdup_n_f32_m(inactive, pg, op) simde_svdup_n_f32_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svdup_f32_m(simde_svfloat32_t inactive, simde_svbool_t pg, simde_float32_t op) {
return simde_svdup_n_f32_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_f32_m
#define svdup_f32_m(inactive, pg, op) simde_svdup_n_f32_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svdup_n_f64(simde_float64 op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_f64(op);
#else
simde_svfloat64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r.neon = vdupq_n_f64(op);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512d = _mm512_set1_pd(op);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256d) / sizeof(r.m256d[0])) ; i++) {
r.m256d[i] = _mm256_set1_pd(op);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128d) / sizeof(r.m128d[0])) ; i++) {
r.m128d[i] = _mm_set1_pd(op);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_splats(op);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_f64x2_splat(op);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = op;
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_f64
#define svdup_n_f64(op) simde_svdup_n_f64((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svdup_f64(simde_float64 op) {
return simde_svdup_n_f64(op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_f64
#define svdup_f64(op) simde_svdup_n_f64((op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svdup_n_f64_z(simde_svbool_t pg, simde_float64 op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_f64_z(pg, op);
#else
return simde_x_svsel_f64_z(pg, simde_svdup_n_f64(op));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_f64_z
#define svdup_n_f64_z(pg, op) simde_svdup_n_f64_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svdup_f64_z(simde_svbool_t pg, simde_float64 op) {
return simde_svdup_n_f64_z(pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_f64_z
#define svdup_f64_z(pg, op) simde_svdup_n_f64_z((pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svdup_n_f64_m(simde_svfloat64_t inactive, simde_svbool_t pg, simde_float64_t op) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svdup_n_f64_m(inactive, pg, op);
#else
return simde_svsel_f64(pg, simde_svdup_n_f64(op), inactive);
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_n_f64_m
#define svdup_n_f64_m(inactive, pg, op) simde_svdup_n_f64_m((inactive), (pg), (op))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svdup_f64_m(simde_svfloat64_t inactive, simde_svbool_t pg, simde_float64_t op) {
return simde_svdup_n_f64_m(inactive, pg, op);
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svdup_f64_m
#define svdup_f64_m(inactive, pg, op) simde_svdup_n_f64_m((inactive), (pg), (op))
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup_n ( int8_t op) { return simde_svdup_n_s8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup ( int8_t op) { return simde_svdup_n_s8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup_n_z(simde_svbool_t pg, int8_t op) { return simde_svdup_n_s8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup_z (simde_svbool_t pg, int8_t op) { return simde_svdup_n_s8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup_n ( int16_t op) { return simde_svdup_n_s16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup ( int16_t op) { return simde_svdup_n_s16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup_n_z(simde_svbool_t pg, int16_t op) { return simde_svdup_n_s16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup_z (simde_svbool_t pg, int16_t op) { return simde_svdup_n_s16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup_n ( int32_t op) { return simde_svdup_n_s32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup ( int32_t op) { return simde_svdup_n_s32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup_n_z(simde_svbool_t pg, int32_t op) { return simde_svdup_n_s32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup_z (simde_svbool_t pg, int32_t op) { return simde_svdup_n_s32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup_n ( int64_t op) { return simde_svdup_n_s64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup ( int64_t op) { return simde_svdup_n_s64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup_n_z(simde_svbool_t pg, int64_t op) { return simde_svdup_n_s64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup_z (simde_svbool_t pg, int64_t op) { return simde_svdup_n_s64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup_n ( uint8_t op) { return simde_svdup_n_u8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup ( uint8_t op) { return simde_svdup_n_u8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup_n_z(simde_svbool_t pg, uint8_t op) { return simde_svdup_n_u8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup_z (simde_svbool_t pg, uint8_t op) { return simde_svdup_n_u8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup_n ( uint16_t op) { return simde_svdup_n_u16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup ( uint16_t op) { return simde_svdup_n_u16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup_n_z(simde_svbool_t pg, uint16_t op) { return simde_svdup_n_u16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup_z (simde_svbool_t pg, uint16_t op) { return simde_svdup_n_u16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup_n ( uint32_t op) { return simde_svdup_n_u32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup ( uint32_t op) { return simde_svdup_n_u32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup_n_z(simde_svbool_t pg, uint32_t op) { return simde_svdup_n_u32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup_z (simde_svbool_t pg, uint32_t op) { return simde_svdup_n_u32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup_n ( uint64_t op) { return simde_svdup_n_u64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup ( uint64_t op) { return simde_svdup_n_u64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup_n_z(simde_svbool_t pg, uint64_t op) { return simde_svdup_n_u64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup_z (simde_svbool_t pg, uint64_t op) { return simde_svdup_n_u64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup_n ( simde_float32 op) { return simde_svdup_n_f32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup ( simde_float32 op) { return simde_svdup_n_f32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup_n_z(simde_svbool_t pg, simde_float32 op) { return simde_svdup_n_f32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup_z (simde_svbool_t pg, simde_float32 op) { return simde_svdup_n_f32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup_n ( simde_float64 op) { return simde_svdup_n_f64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup ( simde_float64 op) { return simde_svdup_n_f64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup_n_z(simde_svbool_t pg, simde_float64 op) { return simde_svdup_n_f64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup_z (simde_svbool_t pg, simde_float64 op) { return simde_svdup_n_f64_z (pg, op); }
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup_n ( int8_t op) { return svdup_n_s8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup ( int8_t op) { return svdup_n_s8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup_n_z(svbool_t pg, int8_t op) { return svdup_n_s8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup_z (svbool_t pg, int8_t op) { return svdup_n_s8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup_n ( int16_t op) { return svdup_n_s16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup ( int16_t op) { return svdup_n_s16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup_n_z(svbool_t pg, int16_t op) { return svdup_n_s16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup_z (svbool_t pg, int16_t op) { return svdup_n_s16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup_n ( int32_t op) { return svdup_n_s32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup ( int32_t op) { return svdup_n_s32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup_n_z(svbool_t pg, int32_t op) { return svdup_n_s32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup_z (svbool_t pg, int32_t op) { return svdup_n_s32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup_n ( int64_t op) { return svdup_n_s64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup ( int64_t op) { return svdup_n_s64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup_n_z(svbool_t pg, int64_t op) { return svdup_n_s64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup_z (svbool_t pg, int64_t op) { return svdup_n_s64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup_n ( uint8_t op) { return svdup_n_u8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup ( uint8_t op) { return svdup_n_u8 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup_n_z(svbool_t pg, uint8_t op) { return svdup_n_u8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup_z (svbool_t pg, uint8_t op) { return svdup_n_u8_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup_n ( uint16_t op) { return svdup_n_u16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup ( uint16_t op) { return svdup_n_u16 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup_n_z(svbool_t pg, uint16_t op) { return svdup_n_u16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup_z (svbool_t pg, uint16_t op) { return svdup_n_u16_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup_n ( uint32_t op) { return svdup_n_u32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup ( uint32_t op) { return svdup_n_u32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup_n_z(svbool_t pg, uint32_t op) { return svdup_n_u32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup_z (svbool_t pg, uint32_t op) { return svdup_n_u32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup_n ( uint64_t op) { return svdup_n_u64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup ( uint64_t op) { return svdup_n_u64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup_n_z(svbool_t pg, uint64_t op) { return svdup_n_u64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup_z (svbool_t pg, uint64_t op) { return svdup_n_u64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup_n ( simde_float32 op) { return svdup_n_f32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup ( simde_float32 op) { return svdup_n_f32 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup_n_z(svbool_t pg, simde_float32 op) { return svdup_n_f32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup_z (svbool_t pg, simde_float32 op) { return svdup_n_f32_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup_n ( simde_float64 op) { return svdup_n_f64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup ( simde_float64 op) { return svdup_n_f64 ( op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup_n_z(svbool_t pg, simde_float64 op) { return svdup_n_f64_z (pg, op); }
SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup_z (svbool_t pg, simde_float64 op) { return svdup_n_f64_z (pg, op); }
#endif
#elif defined(SIMDE_GENERIC_)
#define simde_svdup_n(op) \
(SIMDE_GENERIC_((op), \
int8_t: simde_svdup_n_s8, \
int16_t: simde_svdup_n_s16, \
int32_t: simde_svdup_n_s32, \
int64_t: simde_svdup_n_s64, \
uint8_t: simde_svdup_n_u8, \
uint16_t: simde_svdup_n_u16, \
uint32_t: simde_svdup_n_u32, \
uint64_t: simde_svdup_n_u64, \
float32_t: simde_svdup_n_f32, \
float64_t: simde_svdup_n_f64)((op)))
#define simde_svdup(op) simde_svdup_n((op))
#define simde_svdup_n_z(pg, op) \
(SIMDE_GENERIC_((op), \
int8_t: simde_svdup_n_s8_z, \
int16_t: simde_svdup_n_s16_z, \
int32_t: simde_svdup_n_s32_z, \
int64_t: simde_svdup_n_s64_z, \
uint8_t: simde_svdup_n_s8_z, \
uint16_t: simde_svdup_n_u16_z, \
uint32_t: simde_svdup_n_u32_z, \
uint64_t: simde_svdup_n_u64_z, \
float32_t: simde_svdup_n_u32_z, \
float64_t: simde_svdup_n_f64_z)((pg), (op)))
#define simde_svdup_z(pg, op) simde_svdup_n_z((pg), (op))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svdup
#undef svdup_z
#undef svdup_n
#undef svdup_n_z
#define svdup_n(op) simde_svdup_n((op))
#define svdup_n_z(pg, op) simde_svdup_n_z((pg), (op))
#define svdup(op) simde_svdup((op))
#define svdup_z(pg, op) simde_svdup_z((pg), (op))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_DUP_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/sel.h | .h | 28,614 | 627 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_SEL_H)
#define SIMDE_ARM_SVE_SEL_H
#include "types.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_x_svsel_s8_z(simde_svbool_t pg, simde_svint8_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s8_z(pg, op1, op1);
#else
simde_svint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s8(pg.neon_i8, op1.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_mov_epi8(simde_svbool_to_mmask64(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_mov_epi8(simde_svbool_to_mmask32(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_and(pg.altivec_b8, op1.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = pg.values_i8 & op1.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, op1.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = pg.values_i8 & op1.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = pg.values_i8[i] & op1.values[i];
}
#endif
return r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svsel_s8(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_s8(pg, op1, op2);
#else
simde_svint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vbslq_s8(pg.neon_u8, op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_mask_mov_epi8(op2.m512i, simde_svbool_to_mmask64(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_mask_mov_epi8(op2.m256i[0], simde_svbool_to_mmask32(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]);
}
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i]));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = (pg.values_i8 & op1.values) | (~pg.values_i8 & op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = (pg.values_i8[i] & op1.values[i]) | (~pg.values_i8[i] & op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_s8
#define svsel_s8(pg, op1, op2) simde_svsel_s8(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_x_svsel_s16_z(simde_svbool_t pg, simde_svint16_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s16_z(pg, op1, op1);
#else
simde_svint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s16(pg.neon_i16, op1.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_mov_epi16(simde_svbool_to_mmask32(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_mov_epi16(simde_svbool_to_mmask16(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_and(pg.altivec_b16, op1.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = pg.values_i16 & op1.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, op1.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = pg.values_i16 & op1.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = pg.values_i16[i] & op1.values[i];
}
#endif
return r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svsel_s16(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_s16(pg, op1, op2);
#else
simde_svint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vbslq_s16(pg.neon_u16, op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_mask_mov_epi16(op2.m512i, simde_svbool_to_mmask32(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_mask_mov_epi16(op2.m256i[0], simde_svbool_to_mmask16(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]);
}
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i]));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = (pg.values_i16 & op1.values) | (~pg.values_i16 & op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = (pg.values_i16[i] & op1.values[i]) | (~pg.values_i16[i] & op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_s16
#define svsel_s16(pg, op1, op2) simde_svsel_s16(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_x_svsel_s32_z(simde_svbool_t pg, simde_svint32_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s32_z(pg, op1, op1);
#else
simde_svint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s32(pg.neon_i32, op1.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_mov_epi32(simde_svbool_to_mmask16(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_mov_epi32(simde_svbool_to_mmask8(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_and(pg.altivec_b32, op1.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = pg.values_i32 & op1.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, op1.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = pg.values_i32 & op1.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = pg.values_i32[i] & op1.values[i];
}
#endif
return r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svsel_s32(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_s32(pg, op1, op2);
#else
simde_svint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vbslq_s32(pg.neon_u32, op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_mask_mov_epi32(op2.m512i, simde_svbool_to_mmask16(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_mask_mov_epi32(op2.m256i[0], simde_svbool_to_mmask8(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]);
}
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i]));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = (pg.values_i32 & op1.values) | (~pg.values_i32 & op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = (pg.values_i32[i] & op1.values[i]) | (~pg.values_i32[i] & op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_s32
#define svsel_s32(pg, op1, op2) simde_svsel_s32(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_x_svsel_s64_z(simde_svbool_t pg, simde_svint64_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_s64_z(pg, op1, op1);
#else
simde_svint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vandq_s64(pg.neon_i64, op1.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_maskz_mov_epi64(simde_svbool_to_mmask8(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_maskz_mov_epi64(simde_svbool_to_mmask4(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r.altivec = vec_and(pg.altivec_b64, op1.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec = HEDLEY_REINTERPRET_CAST(__typeof__(op1.altivec), pg.values_i64) & op1.altivec;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_and(pg.v128, op1.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = pg.values_i64 & op1.values;
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = pg.values_i64[i] & op1.values[i];
}
#endif
return r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svsel_s64(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_s64(pg, op1, op2);
#else
simde_svint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vbslq_s64(pg.neon_u64, op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m512i = _mm512_mask_mov_epi64(op2.m512i, simde_svbool_to_mmask8(pg), op1.m512i);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
r.m256i[0] = _mm256_mask_mov_epi64(op2.m256i[0], simde_svbool_to_mmask4(pg), op1.m256i[0]);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]);
}
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i]));
}
#elif (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && !defined(SIMDE_BUG_CLANG_46770)
r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r.values = (pg.values_i64 & op1.values) | (~pg.values_i64 & op2.values);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = (pg.values_i64[i] & op1.values[i]) | (~pg.values_i64[i] & op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_s64
#define svsel_s64(pg, op1, op2) simde_svsel_s64(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_x_svsel_u8_z(simde_svbool_t pg, simde_svuint8_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u8_z(pg, op1, op1);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svuint8_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r.m512i = _mm512_maskz_mov_epi8(simde_svbool_to_mmask64(pg), op1.m512i);
#else
r.m256i[0] = _mm256_maskz_mov_epi8(simde_svbool_to_mmask32(pg), op1.m256i[0]);
#endif
return r;
#else
return simde_svreinterpret_u8_s8(simde_x_svsel_s8_z(pg, simde_svreinterpret_s8_u8(op1)));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svsel_u8(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_u8(pg, op1, op2);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) \
&& (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
simde_svuint8_t r;
#if SIMDE_ARM_SVE_VECTOR_SIZE >= 512
r.m512i = _mm512_mask_mov_epi8(op2.m512i, simde_svbool_to_mmask64(pg), op1.m512i);
#else
r.m256i[0] = _mm256_mask_mov_epi8(op2.m256i[0], simde_svbool_to_mmask32(pg), op1.m256i[0]);
#endif
return r;
#else
return simde_svreinterpret_u8_s8(simde_svsel_s8(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_u8
#define svsel_u8(pg, op1, op2) simde_svsel_u8(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_x_svsel_u16_z(simde_svbool_t pg, simde_svuint16_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u16_z(pg, op1, op1);
#else
return simde_svreinterpret_u16_s16(simde_x_svsel_s16_z(pg, simde_svreinterpret_s16_u16(op1)));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svsel_u16(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_u16(pg, op1, op2);
#else
return simde_svreinterpret_u16_s16(simde_svsel_s16(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_u16
#define svsel_u16(pg, op1, op2) simde_svsel_u16(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_x_svsel_u32_z(simde_svbool_t pg, simde_svuint32_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u32_z(pg, op1, op1);
#else
return simde_svreinterpret_u32_s32(simde_x_svsel_s32_z(pg, simde_svreinterpret_s32_u32(op1)));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svsel_u32(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_u32(pg, op1, op2);
#else
return simde_svreinterpret_u32_s32(simde_svsel_s32(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_u32
#define svsel_u32(pg, op1, op2) simde_svsel_u32(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_x_svsel_u64_z(simde_svbool_t pg, simde_svuint64_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svand_u64_z(pg, op1, op1);
#else
return simde_svreinterpret_u64_s64(simde_x_svsel_s64_z(pg, simde_svreinterpret_s64_u64(op1)));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svsel_u64(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_u64(pg, op1, op2);
#else
return simde_svreinterpret_u64_s64(simde_svsel_s64(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_u64
#define svsel_u64(pg, op1, op2) simde_svsel_u64(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_x_svsel_f32_z(simde_svbool_t pg, simde_svfloat32_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return simde_svreinterpret_f32_s32(svand_s32_z(pg, simde_svreinterpret_s32_f32(op1), simde_svreinterpret_s32_f32(op1)));
#else
return simde_svreinterpret_f32_s32(simde_x_svsel_s32_z(pg, simde_svreinterpret_s32_f32(op1)));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat32_t
simde_svsel_f32(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_f32(pg, op1, op2);
#else
return simde_svreinterpret_f32_s32(simde_svsel_s32(pg, simde_svreinterpret_s32_f32(op1), simde_svreinterpret_s32_f32(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_f32
#define svsel_f32(pg, op1, op2) simde_svsel_f32(pg, op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_x_svsel_f64_z(simde_svbool_t pg, simde_svfloat64_t op1) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return simde_svreinterpret_f64_s64(svand_s64_z(pg, simde_svreinterpret_s64_f64(op1), simde_svreinterpret_s64_f64(op1)));
#else
return simde_svreinterpret_f64_s64(simde_x_svsel_s64_z(pg, simde_svreinterpret_s64_f64(op1)));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_svfloat64_t
simde_svsel_f64(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svsel_f64(pg, op1, op2);
#else
return simde_svreinterpret_f64_s64(simde_svsel_s64(pg, simde_svreinterpret_s64_f64(op1), simde_svreinterpret_s64_f64(op2)));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svsel_f64
#define svsel_f64(pg, op1, op2) simde_svsel_f64(pg, op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_x_svsel_z(simde_svbool_t pg, simde_svint8_t op1) { return simde_x_svsel_s8_z (pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_x_svsel_z(simde_svbool_t pg, simde_svint16_t op1) { return simde_x_svsel_s16_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_x_svsel_z(simde_svbool_t pg, simde_svint32_t op1) { return simde_x_svsel_s32_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_x_svsel_z(simde_svbool_t pg, simde_svint64_t op1) { return simde_x_svsel_s64_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint8_t op1) { return simde_x_svsel_u8_z (pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint16_t op1) { return simde_x_svsel_u16_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint32_t op1) { return simde_x_svsel_u32_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint64_t op1) { return simde_x_svsel_u64_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_x_svsel_z(simde_svbool_t pg, simde_svfloat32_t op1) { return simde_x_svsel_f32_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_x_svsel_z(simde_svbool_t pg, simde_svfloat64_t op1) { return simde_x_svsel_f64_z(pg, op1); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsel(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsel_s8 (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsel(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsel_s16(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsel(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsel_s32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsel(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsel_s64(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsel(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsel_u8 (pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsel(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsel_u16(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsel(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsel_u32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsel(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsel_u64(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsel(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsel_f32(pg, op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsel(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsel_f64(pg, op1, op2); }
#elif defined(SIMDE_GENERIC_)
#define simde_x_svsel_z(pg, op1) \
(SIMDE_GENERIC_((op1), \
simde_svint8_t: simde_x_svsel_s8_z, \
simde_svint16_t: simde_x_svsel_s16_z, \
simde_svint32_t: simde_x_svsel_s32_z, \
simde_svint64_t: simde_x_svsel_s64_z, \
simde_svuint8_t: simde_x_svsel_u8_z, \
simde_svuint16_t: simde_x_svsel_u16_z, \
simde_svuint32_t: simde_x_svsel_u32_z, \
simde_svuint64_t: simde_x_svsel_u64_z, \
simde_svfloat32_t: simde_x_svsel_f32_z, \
simde_svfloat64_t: simde_x_svsel_f64_z)((pg), (op1)))
#define simde_svsel(pg, op1, op2) \
(SIMDE_GENERIC_((op1), \
simde_svint8_t: simde_svsel_s8, \
simde_svint16_t: simde_svsel_s16, \
simde_svint32_t: simde_svsel_s32, \
simde_svint64_t: simde_svsel_s64, \
simde_svuint8_t: simde_svsel_u8, \
simde_svuint16_t: simde_svsel_u16, \
simde_svuint32_t: simde_svsel_u32, \
simde_svuint64_t: simde_svsel_u64, \
simde_svfloat32_t: simde_svsel_f32, \
simde_svfloat64_t: simde_svsel_f64)((pg), (op1), (op2)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svsel
#define svsel(pg, op1) simde_svsel((pg), (op1))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_SEL_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/sve/qadd.h | .h | 18,612 | 499 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_SVE_QADD_H)
#define SIMDE_ARM_SVE_QADD_H
#include "types.h"
#include "sel.h"
#include "dup.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svqadd_s8(simde_svint8_t op1, simde_svint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_s8(op1, op2);
#else
simde_svint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_s8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_adds_epi8(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_adds_epi8(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_adds_epi8(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_adds(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec =
vec_packs(
vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec),
vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec)
);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i8x16_add_sat(op1.v128, op2.v128);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_i8(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_s8
#define svqadd_s8(op1, op2) simde_svqadd_s8(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint8_t
simde_svqadd_n_s8(simde_svint8_t op1, int8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_s8(op1, op2);
#else
return simde_svqadd_s8(op1, simde_svdup_n_s8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_s8
#define svqadd_n_s8(op1, op2) simde_svqadd_n_s8(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svqadd_s16(simde_svint16_t op1, simde_svint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_s16(op1, op2);
#else
simde_svint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_s16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_adds_epi16(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_adds_epi16(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_adds_epi16(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_adds(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec =
vec_packs(
vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec),
vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec)
);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_i16x8_add_sat(op1.v128, op2.v128);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_i16(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_s16
#define svqadd_s16(op1, op2) simde_svqadd_s16(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint16_t
simde_svqadd_n_s16(simde_svint16_t op1, int16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_s16(op1, op2);
#else
return simde_svqadd_s16(op1, simde_svdup_n_s16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_s16
#define svqadd_n_s16(op1, op2) simde_svqadd_n_s16(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svqadd_s32(simde_svint32_t op1, simde_svint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_s32(op1, op2);
#else
simde_svint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_s32(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512VL_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm512_cvtsepi64_epi32(_mm512_add_epi64(_mm512_cvtepi32_epi64(op1.m256i[i]), _mm512_cvtepi32_epi64(op2.m256i[i])));
}
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm256_cvtsepi64_epi32(_mm256_add_epi64(_mm256_cvtepi32_epi64(op1.m128i[i]), _mm256_cvtepi32_epi64(op2.m128i[i])));
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_adds(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec =
vec_packs(
vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec),
vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec)
);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_i32(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_s32
#define svqadd_s32(op1, op2) simde_svqadd_s32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint32_t
simde_svqadd_n_s32(simde_svint32_t op1, int32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_s32(op1, op2);
#else
return simde_svqadd_s32(op1, simde_svdup_n_s32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_s32
#define svqadd_n_s32(op1, op2) simde_svqadd_n_s32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svqadd_s64(simde_svint64_t op1, simde_svint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_s64(op1, op2);
#else
simde_svint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_s64(op1.neon, op2.neon);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_i64(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_s64
#define svqadd_s64(op1, op2) simde_svqadd_s64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svint64_t
simde_svqadd_n_s64(simde_svint64_t op1, int64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_s64(op1, op2);
#else
return simde_svqadd_s64(op1, simde_svdup_n_s64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_s64
#define svqadd_n_s64(op1, op2) simde_svqadd_n_s64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svqadd_u8(simde_svuint8_t op1, simde_svuint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_u8(op1, op2);
#else
simde_svuint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_u8(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_adds_epu8(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_adds_epu8(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_adds_epu8(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_adds(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec =
vec_packs(
vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec),
vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec)
);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_u8x16_add_sat(op1.v128, op2.v128);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_u8(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_u8
#define svqadd_u8(op1, op2) simde_svqadd_u8(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint8_t
simde_svqadd_n_u8(simde_svuint8_t op1, uint8_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_u8(op1, op2);
#else
return simde_svqadd_u8(op1, simde_svdup_n_u8(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_u8
#define svqadd_n_u8(op1, op2) simde_svqadd_n_u8(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svqadd_u16(simde_svuint16_t op1, simde_svuint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_u16(op1, op2);
#else
simde_svuint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_u16(op1.neon, op2.neon);
#elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512)
r.m512i = _mm512_adds_epu16(op1.m512i, op2.m512i);
#elif defined(SIMDE_X86_AVX2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) {
r.m256i[i] = _mm256_adds_epu16(op1.m256i[i], op2.m256i[i]);
}
#elif defined(SIMDE_X86_SSE2_NATIVE)
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) {
r.m128i[i] = _mm_adds_epu16(op1.m128i[i], op2.m128i[i]);
}
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_adds(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec =
vec_packs(
vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec),
vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec)
);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r.v128 = wasm_u16x8_add_sat(op1.v128, op2.v128);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_u16(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_u16
#define svqadd_u16(op1, op2) simde_svqadd_u16(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint16_t
simde_svqadd_n_u16(simde_svuint16_t op1, uint16_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_u16(op1, op2);
#else
return simde_svqadd_u16(op1, simde_svdup_n_u16(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_u16
#define svqadd_n_u16(op1, op2) simde_svqadd_n_u16(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svqadd_u32(simde_svuint32_t op1, simde_svuint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_u32(op1, op2);
#else
simde_svuint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_u32(op1.neon, op2.neon);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r.altivec = vec_adds(op1.altivec, op2.altivec);
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r.altivec =
vec_packs(
vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec),
vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec)
);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_u32(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_u32
#define svqadd_u32(op1, op2) simde_svqadd_u32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint32_t
simde_svqadd_n_u32(simde_svuint32_t op1, uint32_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_u32(op1, op2);
#else
return simde_svqadd_u32(op1, simde_svdup_n_u32(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_u32
#define svqadd_n_u32(op1, op2) simde_svqadd_n_u32(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svqadd_u64(simde_svuint64_t op1, simde_svuint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_u64(op1, op2);
#else
simde_svuint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r.neon = vqaddq_u64(op1.neon, op2.neon);
#else
SIMDE_VECTORIZE
for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) {
r.values[i] = simde_math_adds_u64(op1.values[i], op2.values[i]);
}
#endif
return r;
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_u64
#define svqadd_u64(op1, op2) simde_svqadd_u64(op1, op2)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_svuint64_t
simde_svqadd_n_u64(simde_svuint64_t op1, uint64_t op2) {
#if defined(SIMDE_ARM_SVE_NATIVE)
return svqadd_n_u64(op1, op2);
#else
return simde_svqadd_u64(op1, simde_svdup_n_u64(op2));
#endif
}
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef simde_svqadd_n_u64
#define svqadd_n_u64(op1, op2) simde_svqadd_n_u64(op1, op2)
#endif
#if defined(__cplusplus)
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svqadd( simde_svint8_t op1, simde_svint8_t op2) { return simde_svqadd_s8 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svqadd( simde_svint16_t op1, simde_svint16_t op2) { return simde_svqadd_s16 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svqadd( simde_svint32_t op1, simde_svint32_t op2) { return simde_svqadd_s32 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svqadd( simde_svint64_t op1, simde_svint64_t op2) { return simde_svqadd_s64 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svqadd( simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svqadd_u8 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svqadd( simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svqadd_u16 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svqadd( simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svqadd_u32 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svqadd( simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svqadd_u64 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svqadd( simde_svint8_t op1, int8_t op2) { return simde_svqadd_n_s8 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svqadd( simde_svint16_t op1, int16_t op2) { return simde_svqadd_n_s16(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svqadd( simde_svint32_t op1, int32_t op2) { return simde_svqadd_n_s32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svqadd( simde_svint64_t op1, int64_t op2) { return simde_svqadd_n_s64(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svqadd( simde_svuint8_t op1, uint8_t op2) { return simde_svqadd_n_u8 (op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svqadd( simde_svuint16_t op1, uint16_t op2) { return simde_svqadd_n_u16(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svqadd( simde_svuint32_t op1, uint32_t op2) { return simde_svqadd_n_u32(op1, op2); }
SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svqadd( simde_svuint64_t op1, uint64_t op2) { return simde_svqadd_n_u64(op1, op2); }
#elif defined(SIMDE_GENERIC_)
#define simde_svqadd_x(op1, op2) \
(SIMDE_GENERIC_((op2), \
simde_svint8_t: simde_svqadd_s8, \
simde_svint16_t: simde_svqadd_s16, \
simde_svint32_t: simde_svqadd_s32, \
simde_svint64_t: simde_svqadd_s64, \
simde_svuint8_t: simde_svqadd_u8, \
simde_svuint16_t: simde_svqadd_u16, \
simde_svuint32_t: simde_svqadd_u32, \
simde_svuint64_t: simde_svqadd_u64, \
int8_t: simde_svqadd_n_s8, \
int16_t: simde_svqadd_n_s16, \
int32_t: simde_svqadd_n_s32, \
int64_t: simde_svqadd_n_s64, \
uint8_t: simde_svqadd_n_u8, \
uint16_t: simde_svqadd_n_u16, \
uint32_t: simde_svqadd_n_u32, \
uint64_t: simde_svqadd_n_u64)((pg), (op1), (op2)))
#endif
#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES)
#undef svqadd
#define svqadd(op1, op2) simde_svqadd((pg), (op1), (op2))
#endif
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_SVE_QADD_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/st4.h | .h | 16,018 | 446 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_ST4_H)
#define SIMDE_ARM_NEON_ST4_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_f32(simde_float32_t *ptr, simde_float32x2x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_f32(ptr, val);
#else
simde_float32_t buf[8];
simde_float32x2_private a_[4] = { simde_float32x2_to_private(val.val[0]), simde_float32x2_to_private(val.val[1]),
simde_float32x2_to_private(val.val[2]), simde_float32x2_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_f32
#define vst4_f32(a, b) simde_vst4_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_f64(simde_float64_t *ptr, simde_float64x1x4_t val) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst4_f64(ptr, val);
#else
simde_float64_t buf[4];
simde_float64x1_private a_[4] = { simde_float64x1_to_private(val.val[0]), simde_float64x1_to_private(val.val[1]),
simde_float64x1_to_private(val.val[2]), simde_float64x1_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4_f64
#define vst4_f64(a, b) simde_vst4_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_s8(int8_t *ptr, simde_int8x8x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_s8(ptr, val);
#else
int8_t buf[32];
simde_int8x8_private a_[4] = { simde_int8x8_to_private(val.val[0]), simde_int8x8_to_private(val.val[1]),
simde_int8x8_to_private(val.val[2]), simde_int8x8_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_s8
#define vst4_s8(a, b) simde_vst4_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_s16(int16_t *ptr, simde_int16x4x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_s16(ptr, val);
#else
int16_t buf[16];
simde_int16x4_private a_[4] = { simde_int16x4_to_private(val.val[0]), simde_int16x4_to_private(val.val[1]),
simde_int16x4_to_private(val.val[2]), simde_int16x4_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_s16
#define vst4_s16(a, b) simde_vst4_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_s32(int32_t *ptr, simde_int32x2x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_s32(ptr, val);
#else
int32_t buf[8];
simde_int32x2_private a_[4] = { simde_int32x2_to_private(val.val[0]), simde_int32x2_to_private(val.val[1]),
simde_int32x2_to_private(val.val[2]), simde_int32x2_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_s32
#define vst4_s32(a, b) simde_vst4_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_s64(int64_t *ptr, simde_int64x1x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_s64(ptr, val);
#else
int64_t buf[4];
simde_int64x1_private a_[4] = { simde_int64x1_to_private(val.val[0]), simde_int64x1_to_private(val.val[1]),
simde_int64x1_to_private(val.val[2]), simde_int64x1_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4_s64
#define vst4_s64(a, b) simde_vst4_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_u8(uint8_t *ptr, simde_uint8x8x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_u8(ptr, val);
#else
uint8_t buf[32];
simde_uint8x8_private a_[4] = { simde_uint8x8_to_private(val.val[0]), simde_uint8x8_to_private(val.val[1]),
simde_uint8x8_to_private(val.val[2]), simde_uint8x8_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_u8
#define vst4_u8(a, b) simde_vst4_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_u16(uint16_t *ptr, simde_uint16x4x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_u16(ptr, val);
#else
uint16_t buf[16];
simde_uint16x4_private a_[4] = { simde_uint16x4_to_private(val.val[0]), simde_uint16x4_to_private(val.val[1]),
simde_uint16x4_to_private(val.val[2]), simde_uint16x4_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_u16
#define vst4_u16(a, b) simde_vst4_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_u32(uint32_t *ptr, simde_uint32x2x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_u32(ptr, val);
#else
uint32_t buf[8];
simde_uint32x2_private a_[4] = { simde_uint32x2_to_private(val.val[0]), simde_uint32x2_to_private(val.val[1]),
simde_uint32x2_to_private(val.val[2]), simde_uint32x2_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_u32
#define vst4_u32(a, b) simde_vst4_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_u64(uint64_t *ptr, simde_uint64x1x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4_u64(ptr, val);
#else
uint64_t buf[4];
simde_uint64x1_private a_[4] = { simde_uint64x1_to_private(val.val[0]), simde_uint64x1_to_private(val.val[1]),
simde_uint64x1_to_private(val.val[2]), simde_uint64x1_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4_u64
#define vst4_u64(a, b) simde_vst4_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_f32(simde_float32_t *ptr, simde_float32x4x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_f32(ptr, val);
#else
simde_float32_t buf[16];
simde_float32x4_private a_[4] = { simde_float32x4_to_private(val.val[0]), simde_float32x4_to_private(val.val[1]),
simde_float32x4_to_private(val.val[2]), simde_float32x4_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_f32
#define vst4q_f32(a, b) simde_vst4q_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_f64(simde_float64_t *ptr, simde_float64x2x4_t val) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst4q_f64(ptr, val);
#else
simde_float64_t buf[8];
simde_float64x2_private a_[4] = { simde_float64x2_to_private(val.val[0]), simde_float64x2_to_private(val.val[1]),
simde_float64x2_to_private(val.val[2]), simde_float64x2_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_f64
#define vst4q_f64(a, b) simde_vst4q_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_s8(int8_t *ptr, simde_int8x16x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_s8(ptr, val);
#else
int8_t buf[64];
simde_int8x16_private a_[4] = { simde_int8x16_to_private(val.val[0]), simde_int8x16_to_private(val.val[1]),
simde_int8x16_to_private(val.val[2]), simde_int8x16_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_s8
#define vst4q_s8(a, b) simde_vst4q_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_s16(int16_t *ptr, simde_int16x8x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_s16(ptr, val);
#else
int16_t buf[32];
simde_int16x8_private a_[4] = { simde_int16x8_to_private(val.val[0]), simde_int16x8_to_private(val.val[1]),
simde_int16x8_to_private(val.val[2]), simde_int16x8_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_s16
#define vst4q_s16(a, b) simde_vst4q_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_s32(int32_t *ptr, simde_int32x4x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_s32(ptr, val);
#else
int32_t buf[16];
simde_int32x4_private a_[4] = { simde_int32x4_to_private(val.val[0]), simde_int32x4_to_private(val.val[1]),
simde_int32x4_to_private(val.val[2]), simde_int32x4_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_s32
#define vst4q_s32(a, b) simde_vst4q_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_s64(int64_t *ptr, simde_int64x2x4_t val) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst4q_s64(ptr, val);
#else
int64_t buf[8];
simde_int64x2_private a_[4] = { simde_int64x2_to_private(val.val[0]), simde_int64x2_to_private(val.val[1]),
simde_int64x2_to_private(val.val[2]), simde_int64x2_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_s64
#define vst4q_s64(a, b) simde_vst4q_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_u8(uint8_t *ptr, simde_uint8x16x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_u8(ptr, val);
#else
uint8_t buf[64];
simde_uint8x16_private a_[4] = { simde_uint8x16_to_private(val.val[0]), simde_uint8x16_to_private(val.val[1]),
simde_uint8x16_to_private(val.val[2]), simde_uint8x16_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_u8
#define vst4q_u8(a, b) simde_vst4q_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_u16(uint16_t *ptr, simde_uint16x8x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_u16(ptr, val);
#else
uint16_t buf[32];
simde_uint16x8_private a_[4] = { simde_uint16x8_to_private(val.val[0]), simde_uint16x8_to_private(val.val[1]),
simde_uint16x8_to_private(val.val[2]), simde_uint16x8_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_u16
#define vst4q_u16(a, b) simde_vst4q_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_u32(uint32_t *ptr, simde_uint32x4x4_t val) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst4q_u32(ptr, val);
#else
uint32_t buf[16];
simde_uint32x4_private a_[4] = { simde_uint32x4_to_private(val.val[0]), simde_uint32x4_to_private(val.val[1]),
simde_uint32x4_to_private(val.val[2]), simde_uint32x4_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_u32
#define vst4q_u32(a, b) simde_vst4q_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_u64(uint64_t *ptr, simde_uint64x2x4_t val) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst4q_u64(ptr, val);
#else
uint64_t buf[8];
simde_uint64x2_private a_[4] = { simde_uint64x2_to_private(val.val[0]), simde_uint64x2_to_private(val.val[1]),
simde_uint64x2_to_private(val.val[2]), simde_uint64x2_to_private(val.val[3]) };
for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 4 ; i++) {
buf[i] = a_[i % 4].values[i / 4];
}
simde_memcpy(ptr, buf, sizeof(buf));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_u64
#define vst4q_u64(a, b) simde_vst4q_u64((a), (b))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ST4_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/dot.h | .h | 6,728 | 172 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_DOT_H)
#define SIMDE_ARM_NEON_DOT_H
#include "types.h"
#include "add.h"
#include "combine.h"
#include "dup_n.h"
#include "get_low.h"
#include "get_high.h"
#include "paddl.h"
#include "movn.h"
#include "mull.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vdot_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
return vdot_s32(r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde_vadd_s32(r, simde_vmovn_s64(simde_vpaddlq_s32(simde_vpaddlq_s16(simde_vmull_s8(a, b)))));
#else
simde_int32x2_private r_;
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
for (int i = 0 ; i < 2 ; i++) {
int32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx = j + (i << 2);
acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx]);
}
r_.values[i] = acc;
}
return simde_vadd_s32(r, simde_int32x2_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdot_s32
#define vdot_s32(r, a, b) simde_vdot_s32((r), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vdot_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
return vdot_u32(r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde_vadd_u32(r, simde_vmovn_u64(simde_vpaddlq_u32(simde_vpaddlq_u16(simde_vmull_u8(a, b)))));
#else
simde_uint32x2_private r_;
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
for (int i = 0 ; i < 2 ; i++) {
uint32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx = j + (i << 2);
acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx]);
}
r_.values[i] = acc;
}
return simde_vadd_u32(r, simde_uint32x2_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdot_u32
#define vdot_u32(r, a, b) simde_vdot_u32((r), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vdotq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
return vdotq_s32(r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde_vaddq_s32(r,
simde_vcombine_s32(simde_vmovn_s64(simde_vpaddlq_s32(simde_vpaddlq_s16(simde_vmull_s8(simde_vget_low_s8(a), simde_vget_low_s8(b))))),
simde_vmovn_s64(simde_vpaddlq_s32(simde_vpaddlq_s16(simde_vmull_s8(simde_vget_high_s8(a), simde_vget_high_s8(b)))))));
#else
simde_int32x4_private r_;
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
for (int i = 0 ; i < 4 ; i++) {
int32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx = j + (i << 2);
acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx]);
}
r_.values[i] = acc;
}
return simde_vaddq_s32(r, simde_int32x4_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdotq_s32
#define vdotq_s32(r, a, b) simde_vdotq_s32((r), (a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vdotq_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
return vdotq_u32(r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde_vaddq_u32(r,
simde_vcombine_u32(simde_vmovn_u64(simde_vpaddlq_u32(simde_vpaddlq_u16(simde_vmull_u8(simde_vget_low_u8(a), simde_vget_low_u8(b))))),
simde_vmovn_u64(simde_vpaddlq_u32(simde_vpaddlq_u16(simde_vmull_u8(simde_vget_high_u8(a), simde_vget_high_u8(b)))))));
#else
simde_uint32x4_private r_;
simde_uint8x16_private
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
for (int i = 0 ; i < 4 ; i++) {
uint32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx = j + (i << 2);
acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx]);
}
r_.values[i] = acc;
}
return simde_vaddq_u32(r, simde_uint32x4_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdotq_u32
#define vdotq_u32(r, a, b) simde_vdotq_u32((r), (a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_DOT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/tbx.h | .h | 9,359 | 256 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_TBX_H)
#define SIMDE_ARM_NEON_TBX_H
#include "reinterpret.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbx1_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx1_u8(a, b, c);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b),
c_ = simde_uint8x8_to_private(c);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i a128 = _mm_set1_epi64(a_.m64);
__m128i b128 = _mm_set1_epi64(b_.m64);
__m128i c128 = _mm_set1_epi64(c_.m64);
c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(7)));
__m128i r128 = _mm_shuffle_epi8(b128, c128);
r128 = _mm_blendv_epi8(r128, a128, c128);
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (c_.values[i] < 8) ? b_.values[c_.values[i]] : a_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx1_u8
#define vtbx1_u8(a, b, c) simde_vtbx1_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbx1_s8(simde_int8x8_t a, simde_int8x8_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx1_s8(a, b, c);
#else
return simde_vreinterpret_s8_u8(simde_vtbx1_u8(simde_vreinterpret_u8_s8(a), simde_vreinterpret_u8_s8(b), simde_vreinterpret_u8_s8(c)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx1_s8
#define vtbx1_s8(a, b, c) simde_vtbx1_s8((a), (b), (c))
#endif
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbx2_u8(simde_uint8x8_t a, simde_uint8x8x2_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx2_u8(a, b, c);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_[2] = { simde_uint8x8_to_private(b.val[0]), simde_uint8x8_to_private(b.val[1]) },
c_ = simde_uint8x8_to_private(c);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i a128 = _mm_set1_epi64(a_.m64);
__m128i b128 = _mm_set_epi64(b_[1].m64, b_[0].m64);
__m128i c128 = _mm_set1_epi64(c_.m64);
c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(15)));
__m128i r128 = _mm_shuffle_epi8(b128, c128);
r128 = _mm_blendv_epi8(r128, a128, c128);
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (c_.values[i] < 16) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx2_u8
#define vtbx2_u8(a, b, c) simde_vtbx2_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbx2_s8(simde_int8x8_t a, simde_int8x8x2_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx2_s8(a, b, c);
#else
simde_uint8x8x2_t b_;
simde_memcpy(&b_, &b, sizeof(b_));
return simde_vreinterpret_s8_u8(simde_vtbx2_u8(simde_vreinterpret_u8_s8(a),
b_,
simde_vreinterpret_u8_s8(c)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx2_s8
#define vtbx2_s8(a, b, c) simde_vtbx2_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbx3_u8(simde_uint8x8_t a, simde_uint8x8x3_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx3_u8(a, b, c);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_[3] = { simde_uint8x8_to_private(b.val[0]), simde_uint8x8_to_private(b.val[1]), simde_uint8x8_to_private(b.val[2]) },
c_ = simde_uint8x8_to_private(c);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i a128 = _mm_set1_epi64(a_.m64);
__m128i c128 = _mm_set1_epi64(c_.m64);
c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(23)));
__m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(b_[1].m64, b_[0].m64), c128);
__m128i r128_2 = _mm_shuffle_epi8(_mm_set1_epi64(b_[2].m64), c128);
__m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(c128, 3));
r128 = _mm_blendv_epi8(r128, a128, c128);
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (c_.values[i] < 24) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx3_u8
#define vtbx3_u8(a, b, c) simde_vtbx3_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbx3_s8(simde_int8x8_t a, simde_int8x8x3_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx3_s8(a, b, c);
#else
simde_uint8x8x3_t b_;
simde_memcpy(&b_, &b, sizeof(b_));
return simde_vreinterpret_s8_u8(simde_vtbx3_u8(simde_vreinterpret_u8_s8(a),
b_,
simde_vreinterpret_u8_s8(c)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx3_s8
#define vtbx3_s8(a, b, c) simde_vtbx3_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbx4_u8(simde_uint8x8_t a, simde_uint8x8x4_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx4_u8(a, b, c);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_[4] = { simde_uint8x8_to_private(b.val[0]), simde_uint8x8_to_private(b.val[1]), simde_uint8x8_to_private(b.val[2]), simde_uint8x8_to_private(b.val[3]) },
c_ = simde_uint8x8_to_private(c);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i a128 = _mm_set1_epi64(a_.m64);
__m128i c128 = _mm_set1_epi64(c_.m64);
c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(31)));
__m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(b_[1].m64, b_[0].m64), c128);
__m128i r128_23 = _mm_shuffle_epi8(_mm_set_epi64(b_[3].m64, b_[2].m64), c128);
__m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(c128, 3));
r128 = _mm_blendv_epi8(r128, a128, c128);
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (c_.values[i] < 32) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx4_u8
#define vtbx4_u8(a, b, c) simde_vtbx4_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbx4_s8(simde_int8x8_t a, simde_int8x8x4_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbx4_s8(a, b, c);
#else
simde_uint8x8x4_t b_;
simde_memcpy(&b_, &b, sizeof(b_));
return simde_vreinterpret_s8_u8(simde_vtbx4_u8(simde_vreinterpret_u8_s8(a),
b_,
simde_vreinterpret_u8_s8(c)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbx4_s8
#define vtbx4_s8(a, b, c) simde_vtbx4_s8((a), (b), (c))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_TBX_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/maxnm.h | .h | 7,025 | 218 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MAXNM_H)
#define SIMDE_ARM_NEON_MAXNM_H
#include "types.h"
#include "cge.h"
#include "bsl.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmaxnm_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6)
return vmaxnm_f32(a, b);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if defined(simde_math_fmaxf)
r_.values[i] = simde_math_fmaxf(a_.values[i], b_.values[i]);
#else
if (a_.values[i] > b_.values[i]) {
r_.values[i] = a_.values[i];
} else if (a_.values[i] < b_.values[i]) {
r_.values[i] = b_.values[i];
} else if (a_.values[i] == a_.values[i]) {
r_.values[i] = a_.values[i];
} else {
r_.values[i] = b_.values[i];
}
#endif
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmaxnm_f32
#define vmaxnm_f32(a, b) simde_vmaxnm_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmaxnm_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmaxnm_f64(a, b);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if defined(simde_math_fmax)
r_.values[i] = simde_math_fmax(a_.values[i], b_.values[i]);
#else
if (a_.values[i] > b_.values[i]) {
r_.values[i] = a_.values[i];
} else if (a_.values[i] < b_.values[i]) {
r_.values[i] = b_.values[i];
} else if (a_.values[i] == a_.values[i]) {
r_.values[i] = a_.values[i];
} else {
r_.values[i] = b_.values[i];
}
#endif
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmaxnm_f64
#define vmaxnm_f64(a, b) simde_vmaxnm_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmaxnmq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6)
return vmaxnmq_f32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_max(a, b);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_X86_SSE_NATIVE)
#if !defined(SIMDE_FAST_NANS)
__m128 r = _mm_max_ps(a_.m128, b_.m128);
__m128 bnan = _mm_cmpunord_ps(b_.m128, b_.m128);
r = _mm_andnot_ps(bnan, r);
r = _mm_or_ps(r, _mm_and_ps(a_.m128, bnan));
r_.m128 = r;
#else
r_.m128 = _mm_max_ps(a_.m128, b_.m128);
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.v128 = wasm_f32x4_max(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if defined(simde_math_fmaxf)
r_.values[i] = simde_math_fmaxf(a_.values[i], b_.values[i]);
#else
if (a_.values[i] > b_.values[i]) {
r_.values[i] = a_.values[i];
} else if (a_.values[i] < b_.values[i]) {
r_.values[i] = b_.values[i];
} else if (a_.values[i] == a_.values[i]) {
r_.values[i] = a_.values[i];
} else {
r_.values[i] = b_.values[i];
}
#endif
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmaxnmq_f32
#define vmaxnmq_f32(a, b) simde_vmaxnmq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmaxnmq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmaxnmq_f64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_max(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
#if !defined(SIMDE_FAST_NANS)
__m128d r = _mm_max_pd(a_.m128d, b_.m128d);
__m128d bnan = _mm_cmpunord_pd(b_.m128d, b_.m128d);
r = _mm_andnot_pd(bnan, r);
r = _mm_or_pd(r, _mm_and_pd(a_.m128d, bnan));
r_.m128d = r;
#else
r_.m128d = _mm_max_pd(a_.m128d, b_.m128d);
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.v128 = wasm_f64x2_max(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if defined(simde_math_fmax)
r_.values[i] = simde_math_fmax(a_.values[i], b_.values[i]);
#else
if (a_.values[i] > b_.values[i]) {
r_.values[i] = a_.values[i];
} else if (a_.values[i] < b_.values[i]) {
r_.values[i] = b_.values[i];
} else if (a_.values[i] == a_.values[i]) {
r_.values[i] = a_.values[i];
} else {
r_.values[i] = b_.values[i];
}
#endif
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmaxnmq_f64
#define vmaxnmq_f64(a, b) simde_vmaxnmq_f64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MAXNM_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1_x4.h | .h | 11,455 | 299 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
* 2021 Décio Luiz Gazzoni Filho <decio@decpp.net>
*/
#if !defined(SIMDE_ARM_NEON_LD1_X4_H)
#define SIMDE_ARM_NEON_LD1_X4_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_
#endif
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2x4_t
simde_vld1_f32_x4(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_f32_x4(ptr);
#else
simde_float32x2_private a_[4];
for (size_t i = 0; i < 8; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_float32x2x4_t s_ = { { simde_float32x2_from_private(a_[0]),
simde_float32x2_from_private(a_[1]),
simde_float32x2_from_private(a_[2]),
simde_float32x2_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_f32_x4
#define vld1_f32_x4(a) simde_vld1_f32_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1x4_t
simde_vld1_f64_x4(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if \
defined(SIMDE_ARM_NEON_A64V8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))
return vld1_f64_x4(ptr);
#else
simde_float64x1_private a_[4];
for (size_t i = 0; i < 4; i++) {
a_[i].values[0] = ptr[i];
}
simde_float64x1x4_t s_ = { { simde_float64x1_from_private(a_[0]),
simde_float64x1_from_private(a_[1]),
simde_float64x1_from_private(a_[2]),
simde_float64x1_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1_f64_x4
#define vld1_f64_x4(a) simde_vld1_f64_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8x4_t
simde_vld1_s8_x4(int8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s8_x4(ptr);
#else
simde_int8x8_private a_[4];
for (size_t i = 0; i < 32; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_int8x8x4_t s_ = { { simde_int8x8_from_private(a_[0]),
simde_int8x8_from_private(a_[1]),
simde_int8x8_from_private(a_[2]),
simde_int8x8_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s8_x4
#define vld1_s8_x4(a) simde_vld1_s8_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4x4_t
simde_vld1_s16_x4(int16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s16_x4(ptr);
#else
simde_int16x4_private a_[4];
for (size_t i = 0; i < 16; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_int16x4x4_t s_ = { { simde_int16x4_from_private(a_[0]),
simde_int16x4_from_private(a_[1]),
simde_int16x4_from_private(a_[2]),
simde_int16x4_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s16_x4
#define vld1_s16_x4(a) simde_vld1_s16_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2x4_t
simde_vld1_s32_x4(int32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s32_x4(ptr);
#else
simde_int32x2_private a_[4];
for (size_t i = 0; i < 8; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_int32x2x4_t s_ = { { simde_int32x2_from_private(a_[0]),
simde_int32x2_from_private(a_[1]),
simde_int32x2_from_private(a_[2]),
simde_int32x2_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s32_x4
#define vld1_s32_x4(a) simde_vld1_s32_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1x4_t
simde_vld1_s64_x4(int64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s64_x4(ptr);
#else
simde_int64x1_private a_[4];
for (size_t i = 0; i < 4; i++) {
a_[i].values[0] = ptr[i];
}
simde_int64x1x4_t s_ = { { simde_int64x1_from_private(a_[0]),
simde_int64x1_from_private(a_[1]),
simde_int64x1_from_private(a_[2]),
simde_int64x1_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s64_x4
#define vld1_s64_x4(a) simde_vld1_s64_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8x4_t
simde_vld1_u8_x4(uint8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u8_x4(ptr);
#else
simde_uint8x8_private a_[4];
for (size_t i = 0; i < 32; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_uint8x8x4_t s_ = { { simde_uint8x8_from_private(a_[0]),
simde_uint8x8_from_private(a_[1]),
simde_uint8x8_from_private(a_[2]),
simde_uint8x8_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u8_x4
#define vld1_u8_x4(a) simde_vld1_u8_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4x4_t
simde_vld1_u16_x4(uint16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u16_x4(ptr);
#else
simde_uint16x4_private a_[4];
for (size_t i = 0; i < 16; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_uint16x4x4_t s_ = { { simde_uint16x4_from_private(a_[0]),
simde_uint16x4_from_private(a_[1]),
simde_uint16x4_from_private(a_[2]),
simde_uint16x4_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u16_x4
#define vld1_u16_x4(a) simde_vld1_u16_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2x4_t
simde_vld1_u32_x4(uint32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u32_x4(ptr);
#else
simde_uint32x2_private a_[4];
for (size_t i = 0; i < 8; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_uint32x2x4_t s_ = { { simde_uint32x2_from_private(a_[0]),
simde_uint32x2_from_private(a_[1]),
simde_uint32x2_from_private(a_[2]),
simde_uint32x2_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u32_x4
#define vld1_u32_x4(a) simde_vld1_u32_x4((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1x4_t
simde_vld1_u64_x4(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u64_x4(ptr);
#else
simde_uint64x1_private a_[4];
for (size_t i = 0; i < 4; i++) {
a_[i].values[0] = ptr[i];
}
simde_uint64x1x4_t s_ = { { simde_uint64x1_from_private(a_[0]),
simde_uint64x1_from_private(a_[1]),
simde_uint64x1_from_private(a_[2]),
simde_uint64x1_from_private(a_[3]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u64_x4
#define vld1_u64_x4(a) simde_vld1_u64_x4((a))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1_X4_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/fma.h | .h | 4,189 | 127 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_FMA_H)
#define SIMDE_ARM_NEON_FMA_H
#include "add.h"
#include "mul.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vfma_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
return vfma_f32(a, b, c);
#else
return simde_vadd_f32(a, simde_vmul_f32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfma_f32
#define vfma_f32(a, b, c) simde_vfma_f32(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vfma_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64x1_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
return vfma_f64(a, b, c);
#else
return simde_vadd_f64(a, simde_vmul_f64(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfma_f64
#define vfma_f64(a, b, c) simde_vfma_f64(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vfmaq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
return vfmaq_f32(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_madd(b, c, a);
#elif \
defined(SIMDE_X86_FMA_NATIVE)
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b),
c_ = simde_float32x4_to_private(c);
#if defined(SIMDE_X86_FMA_NATIVE)
r_.m128 = _mm_fmadd_ps(b_.m128, c_.m128, a_.m128);
#endif
return simde_float32x4_from_private(r_);
#else
return simde_vaddq_f32(a, simde_vmulq_f32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfmaq_f32
#define vfmaq_f32(a, b, c) simde_vfmaq_f32(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vfmaq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
return vfmaq_f64(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_madd(b, c, a);
#elif \
defined(SIMDE_X86_FMA_NATIVE)
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b),
c_ = simde_float64x2_to_private(c);
#if defined(SIMDE_X86_FMA_NATIVE)
r_.m128d = _mm_fmadd_pd(b_.m128d, c_.m128d, a_.m128d);
#endif
return simde_float64x2_from_private(r_);
#else
return simde_vaddq_f64(a, simde_vmulq_f64(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfmaq_f64
#define vfmaq_f64(a, b, c) simde_vfmaq_f64(a, b, c)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CMLA_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qdmulh_lane.h | .h | 6,291 | 164 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_QDMULH_LANE_H)
#define SIMDE_ARM_NEON_QDMULH_LANE_H
#include "types.h"
#include "qdmulh_n.h"
#include "get_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulh_lane_s16(a, v, lane) vqdmulh_lane_s16((a), (v), (lane))
#else
#define simde_vqdmulh_lane_s16(a, v, lane) \
simde_vqdmulh_n_s16((a), simde_vget_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_lane_s16
#define vqdmulh_lane_s16(a, v, lane) simde_vqdmulh_lane_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulh_lane_s32(a, v, lane) vqdmulh_lane_s32((a), (v), (lane))
#else
#define simde_vqdmulh_lane_s32(a, v, lane) \
simde_vqdmulh_n_s32((a), simde_vget_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_lane_s32
#define vqdmulh_lane_s32(a, v, lane) simde_vqdmulh_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulhq_lane_s16(a, v, lane) vqdmulhq_lane_s16((a), (v), (lane))
#else
#define simde_vqdmulhq_lane_s16(a, v, lane) \
simde_vqdmulhq_n_s16((a), simde_vget_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_lane_s16
#define vqdmulhq_lane_s16(a, v, lane) simde_vqdmulhq_lane_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulhq_lane_s32(a, v, lane) vqdmulhq_lane_s32((a), (v), (lane))
#else
#define simde_vqdmulhq_lane_s32(a, v, lane) \
simde_vqdmulhq_n_s32((a), simde_vget_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_lane_s32
#define vqdmulhq_lane_s32(a, v, lane) simde_vqdmulhq_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqdmulh_laneq_s16(a, v, lane) vqdmulh_laneq_s16((a), (v), (lane))
#else
#define simde_vqdmulh_laneq_s16(a, v, lane) \
simde_vqdmulh_n_s16((a), simde_vgetq_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_laneq_s16
#define vqdmulh_laneq_s16(a, v, lane) simde_vqdmulh_laneq_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqdmulh_laneq_s32(a, v, lane) vqdmulh_laneq_s32((a), (v), (lane))
#else
#define simde_vqdmulh_laneq_s32(a, v, lane) \
simde_vqdmulh_n_s32((a), simde_vgetq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_laneq_s32
#define vqdmulh_laneq_s32(a, v, lane) simde_vqdmulh_laneq_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqdmulhq_laneq_s16(a, v, lane) vqdmulhq_laneq_s16((a), (v), (lane))
#else
#define simde_vqdmulhq_laneq_s16(a, v, lane) \
simde_vqdmulhq_n_s16((a), simde_vgetq_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_laneq_s16
#define vqdmulhq_laneq_s16(a, v, lane) simde_vqdmulhq_laneq_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqdmulhq_laneq_s32(a, v, lane) vqdmulhq_laneq_s32((a), (v), (lane))
#else
#define simde_vqdmulhq_laneq_s32(a, v, lane) \
simde_vqdmulhq_n_s32((a), simde_vgetq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_laneq_s32
#define vqdmulhq_laneq_s32(a, v, lane) simde_vqdmulhq_laneq_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vqdmulhs_lane_s32(a, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqdmulhs_lane_s32((a), (v), (lane)))
#else
#define simde_vqdmulhs_lane_s32(a, v, lane) vqdmulhs_lane_s32(a, v, lane)
#endif
#else
#define simde_vqdmulhs_lane_s32(a, v, lane) \
simde_vqdmulhs_s32((a), simde_vget_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulhs_lane_s32
#define vqdmulhs_lane_s32(a, v, lane) simde_vqdmulhs_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vqdmulhs_laneq_s32(a, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqdmulhs_laneq_s32((a), (v), (lane)))
#else
#define simde_vqdmulhs_laneq_s32(a, v, lane) vqdmulhs_laneq_s32(a, v, lane)
#endif
#else
#define simde_vqdmulhs_laneq_s32(a, v, lane) \
simde_vqdmulhs_s32((a), simde_vgetq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulhs_laneq_s32
#define vqdmulhs_laneq_s32(a, v, lane) simde_vqdmulhs_laneq_s32((a), (v), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QDMULH_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ext.h | .h | 41,893 | 851 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_EXT_H)
#define SIMDE_ARM_NEON_EXT_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vext_f32(simde_float32x2_t a, simde_float32x2_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_float32x2_t r;
SIMDE_CONSTIFY_2_(vext_f32, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_float32x2_private
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 1];
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_f32(a, b, n) simde_float32x2_from_m64(_mm_alignr_pi8(simde_float32x2_to_m64(b), simde_float32x2_to_m64(a), n * sizeof(simde_float32)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_f32(a, b, n) (__extension__ ({ \
simde_float32x2_private simde_vext_f32_r_; \
simde_vext_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_float32x2_to_private(a).values, simde_float32x2_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_float32x2_from_private(simde_vext_f32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_f32
#define vext_f32(a, b, n) simde_vext_f32((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vext_f64(simde_float64x1_t a, simde_float64x1_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) n;
return vext_f64(a, b, 0);
#else
simde_float64x1_private
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 0];
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_f64(a, b, n) simde_float64x1_from_m64(_mm_alignr_pi8(simde_float64x1_to_m64(b), simde_float64x1_to_m64(a), n * sizeof(simde_float64)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vext_f64(a, b, n) (__extension__ ({ \
simde_float64x1_private simde_vext_f64_r_; \
simde_vext_f64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_float64x1_to_private(a).values, simde_float64x1_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, (n))); \
simde_float64x1_from_private(simde_vext_f64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vext_f64
#define vext_f64(a, b, n) simde_vext_f64((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vext_s8(simde_int8x8_t a, simde_int8x8_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int8x8_t r;
SIMDE_CONSTIFY_8_(vext_s8, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 7];
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_s8(a, b, n) simde_int8x8_from_m64(_mm_alignr_pi8(simde_int8x8_to_m64(b), simde_int8x8_to_m64(a), n * sizeof(int8_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_s8(a, b, n) (__extension__ ({ \
simde_int8x8_private simde_vext_s8_r_; \
simde_vext_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, simde_int8x8_to_private(a).values, simde_int8x8_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
simde_int8x8_from_private(simde_vext_s8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_s8
#define vext_s8(a, b, n) simde_vext_s8((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vext_s16(simde_int16x4_t a, simde_int16x4_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int16x4_t r;
SIMDE_CONSTIFY_4_(vext_s16, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int16x4_private
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 3];
}
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_s16(a, b, n) simde_int16x4_from_m64(_mm_alignr_pi8(simde_int16x4_to_m64(b), simde_int16x4_to_m64(a), n * sizeof(int16_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_s16(a, b, n) (__extension__ ({ \
simde_int16x4_private simde_vext_s16_r_; \
simde_vext_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, simde_int16x4_to_private(a).values, simde_int16x4_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_int16x4_from_private(simde_vext_s16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_s16
#define vext_s16(a, b, n) simde_vext_s16((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vext_s32(simde_int32x2_t a, simde_int32x2_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int32x2_t r;
SIMDE_CONSTIFY_2_(vext_s32, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int32x2_private
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 1];
}
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_s32(a, b, n) simde_int32x2_from_m64(_mm_alignr_pi8(simde_int32x2_to_m64(b), simde_int32x2_to_m64(a), n * sizeof(int32_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_s32(a, b, n) (__extension__ ({ \
simde_int32x2_private simde_vext_s32_r_; \
simde_vext_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_int32x2_to_private(a).values, simde_int32x2_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_int32x2_from_private(simde_vext_s32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_s32
#define vext_s32(a, b, n) simde_vext_s32((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vext_s64(simde_int64x1_t a, simde_int64x1_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 0) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
(void) n;
return vext_s64(a, b, 0);
#else
simde_int64x1_private
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 0];
}
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_s64(a, b, n) simde_int64x1_from_m64(_mm_alignr_pi8(simde_int64x1_to_m64(b), simde_int64x1_to_m64(a), n * sizeof(int64_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vext_s64(a, b, n) (__extension__ ({ \
simde_int64x1_private simde_vext_s64_r_; \
simde_vext_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_int64x1_to_private(a).values, simde_int64x1_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0))); \
simde_int64x1_from_private(simde_vext_s64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_s64
#define vext_s64(a, b, n) simde_vext_s64((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vext_u8(simde_uint8x8_t a, simde_uint8x8_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint8x8_t r;
SIMDE_CONSTIFY_8_(vext_u8, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 7];
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_u8(a, b, n) simde_uint8x8_from_m64(_mm_alignr_pi8(simde_uint8x8_to_m64(b), simde_uint8x8_to_m64(a), n * sizeof(uint8_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_u8(a, b, n) (__extension__ ({ \
simde_uint8x8_private simde_vext_u8_r_; \
simde_vext_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, simde_uint8x8_to_private(a).values, simde_uint8x8_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
simde_uint8x8_from_private(simde_vext_u8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_u8
#define vext_u8(a, b, n) simde_vext_u8((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vext_u16(simde_uint16x4_t a, simde_uint16x4_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint16x4_t r;
SIMDE_CONSTIFY_4_(vext_u16, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint16x4_private
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 3];
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_u16(a, b, n) simde_uint16x4_from_m64(_mm_alignr_pi8(simde_uint16x4_to_m64(b), simde_uint16x4_to_m64(a), n * sizeof(uint16_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_u16(a, b, n) (__extension__ ({ \
simde_uint16x4_private simde_vext_u16_r_; \
simde_vext_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, simde_uint16x4_to_private(a).values, simde_uint16x4_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_uint16x4_from_private(simde_vext_u16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_u16
#define vext_u16(a, b, n) simde_vext_u16((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vext_u32(simde_uint32x2_t a, simde_uint32x2_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint32x2_t r;
SIMDE_CONSTIFY_2_(vext_u32, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint32x2_private
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 1];
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_u32(a, b, n) simde_uint32x2_from_m64(_mm_alignr_pi8(simde_uint32x2_to_m64(b), simde_uint32x2_to_m64(a), n * sizeof(uint32_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vext_u32(a, b, n) (__extension__ ({ \
simde_uint32x2_private simde_vext_u32_r_; \
simde_vext_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_uint32x2_to_private(a).values, simde_uint32x2_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_uint32x2_from_private(simde_vext_u32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_u32
#define vext_u32(a, b, n) simde_vext_u32((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vext_u64(simde_uint64x1_t a, simde_uint64x1_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 0) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
(void) n;
return vext_u64(a, b, 0);
#else
simde_uint64x1_private
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 0];
}
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vext_u64(a, b, n) simde_uint64x1_from_m64(_mm_alignr_pi8(simde_uint64x1_to_m64(b), simde_uint64x1_to_m64(a), n * sizeof(uint64_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vext_u64(a, b, n) (__extension__ ({ \
simde_uint64x1_private simde_vext_u64_r_; \
simde_vext_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_uint64x1_to_private(a).values, simde_uint64x1_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0))); \
simde_uint64x1_from_private(simde_vext_u64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vext_u64
#define vext_u64(a, b, n) simde_vext_u64((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vextq_f32(simde_float32x4_t a, simde_float32x4_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_float32x4_t r;
SIMDE_CONSTIFY_4_(vextq_f32, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_float32x4_private
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 3];
}
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_f32(a, b, n) simde_float32x4_from_m128(_mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(simde_float32x4_to_m128(b)), _mm_castps_si128(simde_float32x4_to_m128(a)), (n) * sizeof(simde_float32))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_vextq_f32(a, b, n) (__extension__ ({ \
simde_float32x4_private simde_vextq_f32_r_; \
simde_vextq_f32_r_.v128 = wasm_i32x4_shuffle(simde_float32x4_to_private(a).v128, simde_float32x4_to_private(b).v128, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_float32x4_from_private(simde_vextq_f32_r_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_f32(a, b, n) (__extension__ ({ \
simde_float32x4_private simde_vextq_f32_r_; \
simde_vextq_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_float32x4_to_private(a).values, simde_float32x4_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_float32x4_from_private(simde_vextq_f32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_f32
#define vextq_f32(a, b, n) simde_vextq_f32((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vextq_f64(simde_float64x2_t a, simde_float64x2_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
simde_float64x2_t r;
SIMDE_CONSTIFY_2_(vextq_f64, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_float64x2_private
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 1];
}
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_f64(a, b, n) simde_float64x2_from_m128d(_mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(simde_float64x2_to_m128d(b)), _mm_castpd_si128(simde_float64x2_to_m128d(a)), (n) * sizeof(simde_float64))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_vextq_f64(a, b, n) (__extension__ ({ \
simde_float64x2_private simde_vextq_f64_r_; \
simde_vextq_f64_r_.v128 = wasm_i64x2_shuffle(simde_float64x2_to_private(a).v128, simde_float64x2_to_private(b).v128, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_float64x2_from_private(simde_vextq_f64_r_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_f64(a, b, n) (__extension__ ({ \
simde_float64x2_private simde_vextq_f64_r_; \
simde_vextq_f64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_float64x2_to_private(a).values, simde_float64x2_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_float64x2_from_private(simde_vextq_f64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vextq_f64
#define vextq_f64(a, b, n) simde_vextq_f64((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vextq_s8(simde_int8x16_t a, simde_int8x16_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int8x16_t r;
SIMDE_CONSTIFY_16_(vextq_s8, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 15];
}
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_s8(a, b, n) simde_int8x16_from_m128i(_mm_alignr_epi8(simde_int8x16_to_m128i(b), simde_int8x16_to_m128i(a), n * sizeof(int8_t)))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_vextq_s8(a, b, n) (__extension__ ({ \
simde_int8x16_private simde_vextq_s8_r_; \
simde_vextq_s8_r_.v128 = wasm_i8x16_shuffle(simde_int8x16_to_private(a).v128, simde_int8x16_to_private(b).v128, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 8)), HEDLEY_STATIC_CAST(int8_t, ((n) + 9)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 10)), HEDLEY_STATIC_CAST(int8_t, ((n) + 11)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 12)), HEDLEY_STATIC_CAST(int8_t, ((n) + 13)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 14)), HEDLEY_STATIC_CAST(int8_t, ((n) + 15))); \
simde_int8x16_from_private(simde_vextq_s8_r_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_s8(a, b, n) (__extension__ ({ \
simde_int8x16_private simde_vextq_s8_r_; \
simde_vextq_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_int8x16_to_private(a).values, simde_int8x16_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 8)), HEDLEY_STATIC_CAST(int8_t, ((n) + 9)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 10)), HEDLEY_STATIC_CAST(int8_t, ((n) + 11)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 12)), HEDLEY_STATIC_CAST(int8_t, ((n) + 13)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 14)), HEDLEY_STATIC_CAST(int8_t, ((n) + 15))); \
simde_int8x16_from_private(simde_vextq_s8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_s8
#define vextq_s8(a, b, n) simde_vextq_s8((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vextq_s16(simde_int16x8_t a, simde_int16x8_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int16x8_t r;
SIMDE_CONSTIFY_8_(vextq_s16, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int16x8_private
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 7];
}
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_s16(a, b, n) simde_int16x8_from_m128i(_mm_alignr_epi8(simde_int16x8_to_m128i(b), simde_int16x8_to_m128i(a), n * sizeof(int16_t)))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_vextq_s16(a, b, n) (__extension__ ({ \
simde_int16x8_private simde_vextq_s16_r_; \
simde_vextq_s16_r_.v128 = wasm_i16x8_shuffle(simde_int16x8_to_private(a).v128, simde_int16x8_to_private(b).v128, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
simde_int16x8_from_private(simde_vextq_s16_r_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_s16(a, b, n) (__extension__ ({ \
simde_int16x8_private simde_vextq_s16_r_; \
simde_vextq_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_int16x8_to_private(a).values, simde_int16x8_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
simde_int16x8_from_private(simde_vextq_s16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_s16
#define vextq_s16(a, b, n) simde_vextq_s16((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vextq_s32(simde_int32x4_t a, simde_int32x4_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int32x4_t r;
SIMDE_CONSTIFY_4_(vextq_s32, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int32x4_private
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 3];
}
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_s32(a, b, n) simde_int32x4_from_m128i(_mm_alignr_epi8(simde_int32x4_to_m128i(b), simde_int32x4_to_m128i(a), n * sizeof(int32_t)))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_vextq_s32(a, b, n) (__extension__ ({ \
simde_int32x4_private simde_vextq_s32_r_; \
simde_vextq_s32_r_.v128 = wasm_i32x4_shuffle(simde_int32x4_to_private(a).v128, simde_int32x4_to_private(b).v128, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_int32x4_from_private(simde_vextq_s32_r_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_s32(a, b, n) (__extension__ ({ \
simde_int32x4_private simde_vextq_s32_r_; \
simde_vextq_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_int32x4_to_private(a).values, simde_int32x4_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_int32x4_from_private(simde_vextq_s32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_s32
#define vextq_s32(a, b, n) simde_vextq_s32((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vextq_s64(simde_int64x2_t a, simde_int64x2_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int64x2_t r;
SIMDE_CONSTIFY_2_(vextq_s64, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_int64x2_private
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 1];
}
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_s64(a, b, n) simde_int64x2_from_m128i(_mm_alignr_epi8(simde_int64x2_to_m128i(b), simde_int64x2_to_m128i(a), n * sizeof(int64_t)))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_vextq_s64(a, b, n) (__extension__ ({ \
simde_int64x2_private simde_vextq_s64_r_; \
simde_vextq_s64_r_.v128 = wasm_i64x2_shuffle(simde_int64x2_to_private(a).v128, simde_int64x2_to_private(b).v128, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_int64x2_from_private(simde_vextq_s64_r_); \
}))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_s64(a, b, n) (__extension__ ({ \
simde_int64x2_private simde_vextq_s64_r_; \
simde_vextq_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_int64x2_to_private(a).values, simde_int64x2_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_int64x2_from_private(simde_vextq_s64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_s64
#define vextq_s64(a, b, n) simde_vextq_s64((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vextq_u8(simde_uint8x16_t a, simde_uint8x16_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint8x16_t r;
SIMDE_CONSTIFY_16_(vextq_u8, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint8x16_private
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 15];
}
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_u8(a, b, n) simde_uint8x16_from_m128i(_mm_alignr_epi8(simde_uint8x16_to_m128i(b), simde_uint8x16_to_m128i(a), n * sizeof(uint8_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_u8(a, b, n) (__extension__ ({ \
simde_uint8x16_private simde_vextq_u8_r_; \
simde_vextq_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_uint8x16_to_private(a).values, simde_uint8x16_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 8)), HEDLEY_STATIC_CAST(int8_t, ((n) + 9)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 10)), HEDLEY_STATIC_CAST(int8_t, ((n) + 11)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 12)), HEDLEY_STATIC_CAST(int8_t, ((n) + 13)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 14)), HEDLEY_STATIC_CAST(int8_t, ((n) + 15))); \
simde_uint8x16_from_private(simde_vextq_u8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_u8
#define vextq_u8(a, b, n) simde_vextq_u8((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vextq_u16(simde_uint16x8_t a, simde_uint16x8_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint16x8_t r;
SIMDE_CONSTIFY_8_(vextq_u16, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint16x8_private
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 7];
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_u16(a, b, n) simde_uint16x8_from_m128i(_mm_alignr_epi8(simde_uint16x8_to_m128i(b), simde_uint16x8_to_m128i(a), n * sizeof(uint16_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_u16(a, b, n) (__extension__ ({ \
simde_uint16x8_private simde_vextq_u16_r_; \
simde_vextq_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_uint16x8_to_private(a).values, simde_uint16x8_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \
simde_uint16x8_from_private(simde_vextq_u16_r_); \
}))
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vextq_u16(a, b, n) (__extension__ ({ \
simde_uint16x8_private r_; \
r_.values = __builtin_shufflevector( \
simde_uint16x8_to_private(a).values, \
simde_uint16x8_to_private(b).values, \
n + 0, n + 1, n + 2, n + 3, n + 4, n + 5, n + 6, n + 7); \
simde_uint16x8_from_private(r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_u16
#define vextq_u16(a, b, n) simde_vextq_u16((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vextq_u32(simde_uint32x4_t a, simde_uint32x4_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint32x4_t r;
SIMDE_CONSTIFY_4_(vextq_u32, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint32x4_private
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 3];
}
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_u32(a, b, n) simde_uint32x4_from_m128i(_mm_alignr_epi8(simde_uint32x4_to_m128i(b), simde_uint32x4_to_m128i(a), n * sizeof(uint32_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_u32(a, b, n) (__extension__ ({ \
simde_uint32x4_private simde_vextq_u32_r_; \
simde_vextq_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_uint32x4_to_private(a).values, simde_uint32x4_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \
HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \
simde_uint32x4_from_private(simde_vextq_u32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_u32
#define vextq_u32(a, b, n) simde_vextq_u32((a), (b), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vextq_u64(simde_uint64x2_t a, simde_uint64x2_t b, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint64x2_t r;
SIMDE_CONSTIFY_2_(vextq_u64, r, (HEDLEY_UNREACHABLE(), a), n, a, b);
return r;
#else
simde_uint64x2_private
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b),
r_ = a_;
const size_t n_ = HEDLEY_STATIC_CAST(size_t, n);
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
size_t src = i + n_;
r_.values[i] = (src < (sizeof(r_.values) / sizeof(r_.values[0]))) ? a_.values[src] : b_.values[src & 1];
}
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE)
#define simde_vextq_u64(a, b, n) simde_uint64x2_from_m128i(_mm_alignr_epi8(simde_uint64x2_to_m128i(b), simde_uint64x2_to_m128i(a), n * sizeof(uint64_t)))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32)
#define simde_vextq_u64(a, b, n) (__extension__ ({ \
simde_uint64x2_private simde_vextq_u64_r_; \
simde_vextq_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_uint64x2_to_private(a).values, simde_uint64x2_to_private(b).values, \
HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \
simde_uint64x2_from_private(simde_vextq_u64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vextq_u64
#define vextq_u64(a, b, n) simde_vextq_u64((a), (b), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_EXT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qabs.h | .h | 8,314 | 315 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_QABS_H)
#define SIMDE_ARM_NEON_QABS_H
#include "types.h"
#include "abs.h"
#include "add.h"
#include "bsl.h"
#include "dup_n.h"
#include "mvn.h"
#include "reinterpret.h"
#include "shr_n.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vqabsb_s8(int8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqabsb_s8(a);
#else
return a == INT8_MIN ? INT8_MAX : (a < 0 ? -a : a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqabsb_s8
#define vqabsb_s8(a) simde_vqabsb_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vqabsh_s16(int16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqabsh_s16(a);
#else
return a == INT16_MIN ? INT16_MAX : (a < 0 ? -a : a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqabsh_s16
#define vqabsh_s16(a) simde_vqabsh_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vqabss_s32(int32_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqabss_s32(a);
#else
return a == INT32_MIN ? INT32_MAX : (a < 0 ? -a : a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqabss_s32
#define vqabss_s32(a) simde_vqabss_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vqabsd_s64(int64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqabsd_s64(a);
#else
return a == INT64_MIN ? INT64_MAX : (a < 0 ? -a : a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqabsd_s64
#define vqabsd_s64(a) simde_vqabsd_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqabs_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqabs_s8(a);
#else
simde_int8x8_t tmp = simde_vabs_s8(a);
return simde_vadd_s8(tmp, simde_vshr_n_s8(tmp, 7));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqabs_s8
#define vqabs_s8(a) simde_vqabs_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vqabs_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqabs_s16(a);
#else
simde_int16x4_t tmp = simde_vabs_s16(a);
return simde_vadd_s16(tmp, simde_vshr_n_s16(tmp, 15));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqabs_s16
#define vqabs_s16(a) simde_vqabs_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vqabs_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqabs_s32(a);
#else
simde_int32x2_t tmp = simde_vabs_s32(a);
return simde_vadd_s32(tmp, simde_vshr_n_s32(tmp, 31));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqabs_s32
#define vqabs_s32(a) simde_vqabs_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vqabs_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqabs_s64(a);
#else
simde_int64x1_t tmp = simde_vabs_s64(a);
return simde_vadd_s64(tmp, simde_vshr_n_s64(tmp, 63));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqabs_s64
#define vqabs_s64(a) simde_vqabs_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqabsq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqabsq_s8(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(simde_vabsq_s8(a));
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epu8(a_.m128i, _mm_set1_epi8(INT8_MAX));
#else
r_.m128i =
_mm_add_epi8(
a_.m128i,
_mm_cmpgt_epi8(_mm_setzero_si128(), a_.m128i)
);
#endif
return simde_int8x16_from_private(r_);
#else
simde_int8x16_t tmp = simde_vabsq_s8(a);
return
simde_vbslq_s8(
simde_vreinterpretq_u8_s8(simde_vshrq_n_s8(tmp, 7)),
simde_vmvnq_s8(tmp),
tmp
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqabsq_s8
#define vqabsq_s8(a) simde_vqabsq_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vqabsq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqabsq_s16(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(simde_vabsq_s16(a));
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epu16(a_.m128i, _mm_set1_epi16(INT16_MAX));
#else
r_.m128i =
_mm_add_epi16(
a_.m128i,
_mm_srai_epi16(a_.m128i, 15)
);
#endif
return simde_int16x8_from_private(r_);
#else
simde_int16x8_t tmp = simde_vabsq_s16(a);
return
simde_vbslq_s16(
simde_vreinterpretq_u16_s16(simde_vshrq_n_s16(tmp, 15)),
simde_vmvnq_s16(tmp),
tmp
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqabsq_s16
#define vqabsq_s16(a) simde_vqabsq_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vqabsq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqabsq_s32(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(simde_vabsq_s32(a));
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epu32(a_.m128i, _mm_set1_epi32(INT32_MAX));
#else
r_.m128i =
_mm_add_epi32(
a_.m128i,
_mm_srai_epi32(a_.m128i, 31)
);
#endif
return simde_int32x4_from_private(r_);
#else
simde_int32x4_t tmp = simde_vabsq_s32(a);
return
simde_vbslq_s32(
simde_vreinterpretq_u32_s32(simde_vshrq_n_s32(tmp, 31)),
simde_vmvnq_s32(tmp),
tmp
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqabsq_s32
#define vqabsq_s32(a) simde_vqabsq_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vqabsq_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqabsq_s64(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(simde_vabsq_s64(a));
#if defined(SIMDE_X86_SSE4_2_NATIVE)
r_.m128i =
_mm_add_epi64(
a_.m128i,
_mm_cmpgt_epi64(_mm_setzero_si128(), a_.m128i)
);
#else
r_.m128i =
_mm_add_epi64(
a_.m128i,
_mm_shuffle_epi32(
_mm_srai_epi32(a_.m128i, 31),
_MM_SHUFFLE(3, 3, 1, 1)
)
);
#endif
return simde_int64x2_from_private(r_);
#else
simde_int64x2_t tmp = simde_vabsq_s64(a);
return
simde_vbslq_s64(
simde_vreinterpretq_u64_s64(simde_vshrq_n_s64(tmp, 63)),
simde_vreinterpretq_s64_s32(simde_vmvnq_s32(simde_vreinterpretq_s32_s64(tmp))),
tmp
);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqabsq_s64
#define vqabsq_s64(a) simde_vqabsq_s64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QABS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mull.h | .h | 7,990 | 237 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MULL_H)
#define SIMDE_ARM_NEON_MULL_H
#include "types.h"
#include "mul.h"
#include "movl.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmull_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmull_s8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vmulq_s16(simde_vmovl_s8(a), simde_vmovl_s8(b));
#else
simde_int16x8_private r_;
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761)
__typeof__(r_.values) av, bv;
SIMDE_CONVERT_VECTOR_(av, a_.values);
SIMDE_CONVERT_VECTOR_(bv, b_.values);
r_.values = av * bv;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i]) * HEDLEY_STATIC_CAST(int16_t, b_.values[i]);
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_s8
#define vmull_s8(a, b) simde_vmull_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmull_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmull_s16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vmulq_s32(simde_vmovl_s16(a), simde_vmovl_s16(b));
#else
simde_int32x4_private r_;
simde_int16x4_private
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761)
__typeof__(r_.values) av, bv;
SIMDE_CONVERT_VECTOR_(av, a_.values);
SIMDE_CONVERT_VECTOR_(bv, b_.values);
r_.values = av * bv;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i]) * HEDLEY_STATIC_CAST(int32_t, b_.values[i]);
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_s16
#define vmull_s16(a, b) simde_vmull_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmull_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmull_s32(a, b);
#else
simde_int64x2_private r_;
simde_int32x2_private
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
__typeof__(r_.values) av, bv;
SIMDE_CONVERT_VECTOR_(av, a_.values);
SIMDE_CONVERT_VECTOR_(bv, b_.values);
r_.values = av * bv;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) * HEDLEY_STATIC_CAST(int64_t, b_.values[i]);
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_s32
#define vmull_s32(a, b) simde_vmull_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmull_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmull_u8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vmulq_u16(simde_vmovl_u8(a), simde_vmovl_u8(b));
#else
simde_uint16x8_private r_;
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761)
__typeof__(r_.values) av, bv;
SIMDE_CONVERT_VECTOR_(av, a_.values);
SIMDE_CONVERT_VECTOR_(bv, b_.values);
r_.values = av * bv;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) * HEDLEY_STATIC_CAST(uint16_t, b_.values[i]);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_u8
#define vmull_u8(a, b) simde_vmull_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmull_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmull_u16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vmulq_u32(simde_vmovl_u16(a), simde_vmovl_u16(b));
#else
simde_uint32x4_private r_;
simde_uint16x4_private
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761)
__typeof__(r_.values) av, bv;
SIMDE_CONVERT_VECTOR_(av, a_.values);
SIMDE_CONVERT_VECTOR_(bv, b_.values);
r_.values = av * bv;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[i]);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_u16
#define vmull_u16(a, b) simde_vmull_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmull_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmull_u32(a, b);
#else
simde_uint64x2_private r_;
simde_uint32x2_private
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
__typeof__(r_.values) av, bv;
SIMDE_CONVERT_VECTOR_(av, a_.values);
SIMDE_CONVERT_VECTOR_(bv, b_.values);
r_.values = av * bv;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) * HEDLEY_STATIC_CAST(uint64_t, b_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_u32
#define vmull_u32(a, b) simde_vmull_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MULL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlsl_high.h | .h | 4,092 | 125 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MLSL_HIGH_H)
#define SIMDE_ARM_NEON_MLSL_HIGH_H
#include "mull_high.h"
#include "sub.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlsl_high_s8(simde_int16x8_t a, simde_int8x16_t b, simde_int8x16_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_s8(a, b, c);
#else
return simde_vsubq_s16(a, simde_vmull_high_s8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_s8
#define vmlsl_high_s8(a, b, c) simde_vmlsl_high_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlsl_high_s16(simde_int32x4_t a, simde_int16x8_t b, simde_int16x8_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_s16(a, b, c);
#else
return simde_vsubq_s32(a, simde_vmull_high_s16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_s16
#define vmlsl_high_s16(a, b, c) simde_vmlsl_high_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmlsl_high_s32(simde_int64x2_t a, simde_int32x4_t b, simde_int32x4_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_s32(a, b, c);
#else
return simde_vsubq_s64(a, simde_vmull_high_s32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_s32
#define vmlsl_high_s32(a, b, c) simde_vmlsl_high_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlsl_high_u8(simde_uint16x8_t a, simde_uint8x16_t b, simde_uint8x16_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_u8(a, b, c);
#else
return simde_vsubq_u16(a, simde_vmull_high_u8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_u8
#define vmlsl_high_u8(a, b, c) simde_vmlsl_high_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlsl_high_u16(simde_uint32x4_t a, simde_uint16x8_t b, simde_uint16x8_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_u16(a, b, c);
#else
return simde_vsubq_u32(a, simde_vmull_high_u16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_u16
#define vmlsl_high_u16(a, b, c) simde_vmlsl_high_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmlsl_high_u32(simde_uint64x2_t a, simde_uint32x4_t b, simde_uint32x4_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_u32(a, b, c);
#else
return simde_vsubq_u64(a, simde_vmull_high_u32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_u32
#define vmlsl_high_u32(a, b, c) simde_vmlsl_high_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLSL_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/padd.h | .h | 11,574 | 389 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_PADD_H)
#define SIMDE_ARM_NEON_PADD_H
#include "add.h"
#include "uzp1.h"
#include "uzp2.h"
#include "types.h"
#include "get_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vpaddd_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddd_s64(a);
#else
return simde_vaddd_s64(simde_vgetq_lane_s64(a, 0), simde_vgetq_lane_s64(a, 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpaddd_s64
#define vpaddd_s64(a) simde_vpaddd_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vpaddd_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddd_u64(a);
#else
return simde_vaddd_u64(simde_vgetq_lane_u64(a, 0), simde_vgetq_lane_u64(a, 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpaddd_u64
#define vpaddd_u64(a) simde_vpaddd_u64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vpaddd_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddd_f64(a);
#else
simde_float64x2_private a_ = simde_float64x2_to_private(a);
return a_.values[0] + a_.values[1];
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpaddd_f64
#define vpaddd_f64(a) simde_vpaddd_f64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vpadds_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpadds_f32(a);
#else
simde_float32x2_private a_ = simde_float32x2_to_private(a);
return a_.values[0] + a_.values[1];
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpadds_f32
#define vpadds_f32(a) simde_vpadds_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vpadd_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !SIMDE_DETECT_CLANG_VERSION_NOT(9,0,0)
return vpadd_f32(a, b);
#else
return simde_vadd_f32(simde_vuzp1_f32(a, b), simde_vuzp2_f32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_f32
#define vpadd_f32(a, b) simde_vpadd_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vpadd_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadd_s8(a, b);
#else
return simde_vadd_s8(simde_vuzp1_s8(a, b), simde_vuzp2_s8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_s8
#define vpadd_s8(a, b) simde_vpadd_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vpadd_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadd_s16(a, b);
#elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return simde_int16x4_from_m64(_mm_hadd_pi16(simde_int16x4_to_m64(a), simde_int16x4_to_m64(b)));
#else
return simde_vadd_s16(simde_vuzp1_s16(a, b), simde_vuzp2_s16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_s16
#define vpadd_s16(a, b) simde_vpadd_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vpadd_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadd_s32(a, b);
#elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return simde_int32x2_from_m64(_mm_hadd_pi32(simde_int32x2_to_m64(a), simde_int32x2_to_m64(b)));
#else
return simde_vadd_s32(simde_vuzp1_s32(a, b), simde_vuzp2_s32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_s32
#define vpadd_s32(a, b) simde_vpadd_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vpadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadd_u8(a, b);
#else
return simde_vadd_u8(simde_vuzp1_u8(a, b), simde_vuzp2_u8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_u8
#define vpadd_u8(a, b) simde_vpadd_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vpadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadd_u16(a, b);
#else
return simde_vadd_u16(simde_vuzp1_u16(a, b), simde_vuzp2_u16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_u16
#define vpadd_u16(a, b) simde_vpadd_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vpadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadd_u32(a, b);
#else
return simde_vadd_u32(simde_vuzp1_u32(a, b), simde_vuzp2_u32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadd_u32
#define vpadd_u32(a, b) simde_vpadd_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vpaddq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_f32(a, b);
#elif defined(SIMDE_X86_SSE3_NATIVE)
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_X86_SSE3_NATIVE)
r_.m128 = _mm_hadd_ps(a_.m128, b_.m128);
#endif
return simde_float32x4_from_private(r_);
#else
return simde_vaddq_f32(simde_vuzp1q_f32(a, b), simde_vuzp2q_f32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_f32
#define vpaddq_f32(a, b) simde_vpaddq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vpaddq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_f64(a, b);
#elif defined(SIMDE_X86_SSE3_NATIVE)
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_X86_SSE3_NATIVE)
r_.m128d = _mm_hadd_pd(a_.m128d, b_.m128d);
#endif
return simde_float64x2_from_private(r_);
#else
return simde_vaddq_f64(simde_vuzp1q_f64(a, b), simde_vuzp2q_f64(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpaddq_f64
#define vpaddq_f64(a, b) simde_vpaddq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vpaddq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_s8(a, b);
#else
return simde_vaddq_s8(simde_vuzp1q_s8(a, b), simde_vuzp2q_s8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_s8
#define vpaddq_s8(a, b) simde_vpaddq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vpaddq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_s16(a, b);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_hadd_epi16(a_.m128i, b_.m128i);
#endif
return simde_int16x8_from_private(r_);
#else
return simde_vaddq_s16(simde_vuzp1q_s16(a, b), simde_vuzp2q_s16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_s16
#define vpaddq_s16(a, b) simde_vpaddq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vpaddq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_s32(a, b);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_hadd_epi32(a_.m128i, b_.m128i);
#endif
return simde_int32x4_from_private(r_);
#else
return simde_vaddq_s32(simde_vuzp1q_s32(a, b), simde_vuzp2q_s32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_s32
#define vpaddq_s32(a, b) simde_vpaddq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vpaddq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_s64(a, b);
#else
return simde_vaddq_s64(simde_vuzp1q_s64(a, b), simde_vuzp2q_s64(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_s64
#define vpaddq_s64(a, b) simde_vpaddq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vpaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_u8(a, b);
#else
return simde_vaddq_u8(simde_vuzp1q_u8(a, b), simde_vuzp2q_u8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_u8
#define vpaddq_u8(a, b) simde_vpaddq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vpaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_u16(a, b);
#else
return simde_vaddq_u16(simde_vuzp1q_u16(a, b), simde_vuzp2q_u16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_u16
#define vpaddq_u16(a, b) simde_vpaddq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vpaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_u32(a, b);
#else
return simde_vaddq_u32(simde_vuzp1q_u32(a, b), simde_vuzp2q_u32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_u32
#define vpaddq_u32(a, b) simde_vpaddq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vpaddq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpaddq_u64(a, b);
#else
return simde_vaddq_u64(simde_vuzp1q_u64(a, b), simde_vuzp2q_u64(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddq_u64
#define vpaddq_u64(a, b) simde_vpaddq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_PADD_H) */
| Unknown |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.