keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rshr_n.h | .h | 18,114 | 514 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_RSHR_N_H)
#define SIMDE_ARM_NEON_RSHR_N_H
#include "combine.h"
#include "dup_n.h"
#include "get_low.h"
#include "reinterpret.h"
#include "shr_n.h"
#include "sub.h"
#include "tst.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_x_vrshrs_n_s32(int32_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
return (a >> ((n == 32) ? 31 : n)) + ((a & HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << (n - 1))) != 0);
}
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_x_vrshrs_n_u32(uint32_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
return ((n == 32) ? 0 : (a >> n)) + ((a & (UINT32_C(1) << (n - 1))) != 0);
}
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vrshrd_n_s64(int64_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) {
return (a >> ((n == 64) ? 63 : n)) + ((a & HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << (n - 1))) != 0);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vrshrd_n_s64(a, n) vrshrd_n_s64((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrshrd_n_s64
#define vrshrd_n_s64(a, n) simde_vrshrd_n_s64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vrshrd_n_u64(uint64_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) {
return ((n == 64) ? 0 : (a >> n)) + ((a & (UINT64_C(1) << (n - 1))) != 0);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vrshrd_n_u64(a, n) vrshrd_n_u64((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrshrd_n_u64
#define vrshrd_n_u64(a, n) simde_vrshrd_n_u64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vrshrq_n_s8 (const simde_int8x16_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) {
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_int8x16_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_s8(a, n) vrshrq_n_s8((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_s8(a, n) simde_vsubq_s8(simde_vshrq_n_s8((a), (n)), simde_vreinterpretq_s8_u8( \
simde_vtstq_u8(simde_vreinterpretq_u8_s8(a), \
simde_vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, 1 << ((n) - 1))))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_s8
#define vrshrq_n_s8(a, n) simde_vrshrq_n_s8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vrshrq_n_s16 (const simde_int16x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) {
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_int16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_s16(a, n) vrshrq_n_s16((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_s16(a, n) simde_vsubq_s16(simde_vshrq_n_s16((a), (n)), simde_vreinterpretq_s16_u16( \
simde_vtstq_u16(simde_vreinterpretq_u16_s16(a), \
simde_vdupq_n_u16(HEDLEY_STATIC_CAST(uint16_t, 1 << ((n) - 1))))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_s16
#define vrshrq_n_s16(a, n) simde_vrshrq_n_s16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vrshrq_n_s32 (const simde_int32x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >> ((n == 32) ? 31 : n)) + ((a_.values[i] & HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << (n - 1))) != 0);
}
return simde_int32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_s32(a, n) vrshrq_n_s32((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_s32(a, n) simde_vsubq_s32(simde_vshrq_n_s32((a), (n)), \
simde_vreinterpretq_s32_u32(simde_vtstq_u32(simde_vreinterpretq_u32_s32(a), \
simde_vdupq_n_u32(UINT32_C(1) << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_s32
#define vrshrq_n_s32(a, n) simde_vrshrq_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vrshrq_n_s64 (const simde_int64x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) {
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >> ((n == 64) ? 63 : n)) + ((a_.values[i] & HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << (n - 1))) != 0);
}
return simde_int64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_s64(a, n) vrshrq_n_s64((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_s64(a, n) simde_vsubq_s64(simde_vshrq_n_s64((a), (n)), \
simde_vreinterpretq_s64_u64(simde_vtstq_u64(simde_vreinterpretq_u64_s64(a), \
simde_vdupq_n_u64(UINT64_C(1) << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_s64
#define vrshrq_n_s64(a, n) simde_vrshrq_n_s64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vrshrq_n_u8 (const simde_uint8x16_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) {
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_uint8x16_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_u8(a, n) vrshrq_n_u8((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_u8(a, n) simde_vsubq_u8(simde_vshrq_n_u8((a), (n)), \
simde_vtstq_u8((a), simde_vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, 1 << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_u8
#define vrshrq_n_u8(a, n) simde_vrshrq_n_u8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vrshrq_n_u16 (const simde_uint16x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) {
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_uint16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_u16(a, n) vrshrq_n_u16((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_u16(a, n) simde_vsubq_u16(simde_vshrq_n_u16((a), (n)), \
simde_vtstq_u16((a), simde_vdupq_n_u16(HEDLEY_STATIC_CAST(uint16_t, 1 << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_u16
#define vrshrq_n_u16(a, n) simde_vrshrq_n_u16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vrshrq_n_u32 (const simde_uint32x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ((n == 32) ? 0 : (a_.values[i] >> n)) + ((a_.values[i] & (UINT32_C(1) << (n - 1))) != 0);
}
return simde_uint32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_u32(a, n) vrshrq_n_u32((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_u32(a, n) simde_vsubq_u32(simde_vshrq_n_u32((a), (n)), \
simde_vtstq_u32((a), simde_vdupq_n_u32(UINT32_C(1) << ((n) - 1))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_u32
#define vrshrq_n_u32(a, n) simde_vrshrq_n_u32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vrshrq_n_u64 (const simde_uint64x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) {
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ((n == 64) ? 0 : (a_.values[i] >> n)) + ((a_.values[i] & (UINT64_C(1) << (n - 1))) != 0);
}
return simde_uint64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshrq_n_u64(a, n) vrshrq_n_u64((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshrq_n_u64(a, n) simde_vsubq_u64(simde_vshrq_n_u64((a), (n)), \
simde_vtstq_u64((a), simde_vdupq_n_u64(UINT64_C(1) << ((n) - 1))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshrq_n_u64
#define vrshrq_n_u64(a, n) simde_vrshrq_n_u64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vrshr_n_s8 (const simde_int8x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) {
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_int8x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_s8(a, n) vrshr_n_s8((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_s8(a, n) simde_vsub_s8(simde_vshr_n_s8((a), (n)), simde_vreinterpret_s8_u8( \
simde_vtst_u8(simde_vreinterpret_u8_s8(a), \
simde_vdup_n_u8(HEDLEY_STATIC_CAST(uint8_t, 1 << ((n) - 1))))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_s8
#define vrshr_n_s8(a, n) simde_vrshr_n_s8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vrshr_n_s16 (const simde_int16x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) {
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_int16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_s16(a, n) vrshr_n_s16((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_s16(a, n) simde_vsub_s16(simde_vshr_n_s16((a), (n)), simde_vreinterpret_s16_u16( \
simde_vtst_u16(simde_vreinterpret_u16_s16(a), \
simde_vdup_n_u16(HEDLEY_STATIC_CAST(uint16_t, 1 << ((n) - 1))))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_s16
#define vrshr_n_s16(a, n) simde_vrshr_n_s16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vrshr_n_s32 (const simde_int32x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >> ((n == 32) ? 31 : n)) + ((a_.values[i] & HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << (n - 1))) != 0);
}
return simde_int32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_s32(a, n) vrshr_n_s32((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_s32(a, n) simde_vsub_s32(simde_vshr_n_s32((a), (n)), \
simde_vreinterpret_s32_u32(simde_vtst_u32(simde_vreinterpret_u32_s32(a), \
simde_vdup_n_u32(UINT32_C(1) << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_s32
#define vrshr_n_s32(a, n) simde_vrshr_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vrshr_n_s64 (const simde_int64x1_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) {
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >> ((n == 64) ? 63 : n)) + ((a_.values[i] & HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << (n - 1))) != 0);
}
return simde_int64x1_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_s64(a, n) vrshr_n_s64((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_s64(a, n) simde_vsub_s64(simde_vshr_n_s64((a), (n)), \
simde_vreinterpret_s64_u64(simde_vtst_u64(simde_vreinterpret_u64_s64(a), \
simde_vdup_n_u64(UINT64_C(1) << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_s64
#define vrshr_n_s64(a, n) simde_vrshr_n_s64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vrshr_n_u8 (const simde_uint8x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) {
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_uint8x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_u8(a, n) vrshr_n_u8((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_u8(a, n) simde_vsub_u8(simde_vshr_n_u8((a), (n)), \
simde_vtst_u8((a), simde_vdup_n_u8(HEDLEY_STATIC_CAST(uint8_t, 1 << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_u8
#define vrshr_n_u8(a, n) simde_vrshr_n_u8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vrshr_n_u16 (const simde_uint16x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) {
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (a_.values[i] + (1 << (n - 1))) >> n);
}
return simde_uint16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_u16(a, n) vrshr_n_u16((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_u16(a, n) simde_vsub_u16(simde_vshr_n_u16((a), (n)), \
simde_vtst_u16((a), simde_vdup_n_u16(HEDLEY_STATIC_CAST(uint16_t, 1 << ((n) - 1)))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_u16
#define vrshr_n_u16(a, n) simde_vrshr_n_u16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vrshr_n_u32 (const simde_uint32x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ((n == 32) ? 0 : (a_.values[i] >> n)) + ((a_.values[i] & (UINT32_C(1) << (n - 1))) != 0);
}
return simde_uint32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_u32(a, n) vrshr_n_u32((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_u32(a, n) simde_vsub_u32(simde_vshr_n_u32((a), (n)), \
simde_vtst_u32((a), simde_vdup_n_u32(UINT32_C(1) << ((n) - 1))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_u32
#define vrshr_n_u32(a, n) simde_vrshr_n_u32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vrshr_n_u64 (const simde_uint64x1_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) {
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ((n == 64) ? 0 : (a_.values[i] >> n)) + ((a_.values[i] & (UINT64_C(1) << (n - 1))) != 0);
}
return simde_uint64x1_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrshr_n_u64(a, n) vrshr_n_u64((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vrshr_n_u64(a, n) simde_vsub_u64(simde_vshr_n_u64((a), (n)), \
simde_vtst_u64((a), simde_vdup_n_u64(UINT64_C(1) << ((n) - 1))))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshr_n_u64
#define vrshr_n_u64(a, n) simde_vrshr_n_u64((a), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RSHR_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/sra_n.h | .h | 7,798 | 223 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_SRA_N_H)
#define SIMDE_ARM_NEON_SRA_N_H
#include "add.h"
#include "shr_n.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vsrad_n_s64(a, b, n) vsrad_n_s64((a), (b), (n))
#else
#define simde_vsrad_n_s64(a, b, n) simde_vaddd_s64((a), simde_vshrd_n_s64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsrad_n_s64
#define vsrad_n_s64(a, b, n) simde_vsrad_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vsrad_n_u64(a, b, n) vsrad_n_u64((a), (b), (n))
#else
#define simde_vsrad_n_u64(a, b, n) simde_vaddd_u64((a), simde_vshrd_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsrad_n_u64
#define vsrad_n_u64(a, b, n) simde_vsrad_n_u64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_s8(a, b, n) vsra_n_s8((a), (b), (n))
#else
#define simde_vsra_n_s8(a, b, n) simde_vadd_s8((a), simde_vshr_n_s8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_s8
#define vsra_n_s8(a, b, n) simde_vsra_n_s8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_s16(a, b, n) vsra_n_s16((a), (b), (n))
#else
#define simde_vsra_n_s16(a, b, n) simde_vadd_s16((a), simde_vshr_n_s16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_s16
#define vsra_n_s16(a, b, n) simde_vsra_n_s16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_s32(a, b, n) vsra_n_s32((a), (b), (n))
#else
#define simde_vsra_n_s32(a, b, n) simde_vadd_s32((a), simde_vshr_n_s32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_s32
#define vsra_n_s32(a, b, n) simde_vsra_n_s32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_s64(a, b, n) vsra_n_s64((a), (b), (n))
#else
#define simde_vsra_n_s64(a, b, n) simde_vadd_s64((a), simde_vshr_n_s64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_s64
#define vsra_n_s64(a, b, n) simde_vsra_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_u8(a, b, n) vsra_n_u8((a), (b), (n))
#else
#define simde_vsra_n_u8(a, b, n) simde_vadd_u8((a), simde_vshr_n_u8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_u8
#define vsra_n_u8(a, b, n) simde_vsra_n_u8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_u16(a, b, n) vsra_n_u16((a), (b), (n))
#else
#define simde_vsra_n_u16(a, b, n) simde_vadd_u16((a), simde_vshr_n_u16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_u16
#define vsra_n_u16(a, b, n) simde_vsra_n_u16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_u32(a, b, n) vsra_n_u32((a), (b), (n))
#else
#define simde_vsra_n_u32(a, b, n) simde_vadd_u32((a), simde_vshr_n_u32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_u32
#define vsra_n_u32(a, b, n) simde_vsra_n_u32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsra_n_u64(a, b, n) vsra_n_u64((a), (b), (n))
#else
#define simde_vsra_n_u64(a, b, n) simde_vadd_u64((a), simde_vshr_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsra_n_u64
#define vsra_n_u64(a, b, n) simde_vsra_n_u64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_s8(a, b, n) vsraq_n_s8((a), (b), (n))
#else
#define simde_vsraq_n_s8(a, b, n) simde_vaddq_s8((a), simde_vshrq_n_s8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_s8
#define vsraq_n_s8(a, b, n) simde_vsraq_n_s8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_s16(a, b, n) vsraq_n_s16((a), (b), (n))
#else
#define simde_vsraq_n_s16(a, b, n) simde_vaddq_s16((a), simde_vshrq_n_s16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_s16
#define vsraq_n_s16(a, b, n) simde_vsraq_n_s16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_s32(a, b, n) vsraq_n_s32((a), (b), (n))
#else
#define simde_vsraq_n_s32(a, b, n) simde_vaddq_s32((a), simde_vshrq_n_s32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_s32
#define vsraq_n_s32(a, b, n) simde_vsraq_n_s32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_s64(a, b, n) vsraq_n_s64((a), (b), (n))
#else
#define simde_vsraq_n_s64(a, b, n) simde_vaddq_s64((a), simde_vshrq_n_s64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_s64
#define vsraq_n_s64(a, b, n) simde_vsraq_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_u8(a, b, n) vsraq_n_u8((a), (b), (n))
#else
#define simde_vsraq_n_u8(a, b, n) simde_vaddq_u8((a), simde_vshrq_n_u8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_u8
#define vsraq_n_u8(a, b, n) simde_vsraq_n_u8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_u16(a, b, n) vsraq_n_u16((a), (b), (n))
#else
#define simde_vsraq_n_u16(a, b, n) simde_vaddq_u16((a), simde_vshrq_n_u16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_u16
#define vsraq_n_u16(a, b, n) simde_vsraq_n_u16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_u32(a, b, n) vsraq_n_u32((a), (b), (n))
#else
#define simde_vsraq_n_u32(a, b, n) simde_vaddq_u32((a), simde_vshrq_n_u32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_u32
#define vsraq_n_u32(a, b, n) simde_vsraq_n_u32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsraq_n_u64(a, b, n) vsraq_n_u64((a), (b), (n))
#else
#define simde_vsraq_n_u64(a, b, n) simde_vaddq_u64((a), simde_vshrq_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsraq_n_u64
#define vsraq_n_u64(a, b, n) simde_vsraq_n_u64((a), (b), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SRA_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/bsl.h | .h | 26,650 | 762 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_BSL_H)
#define SIMDE_ARM_NEON_BSL_H
#include "types.h"
#include "reinterpret.h"
#include "and.h"
#include "eor.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float16x4_t
simde_vbsl_f16(simde_uint16x4_t a, simde_float16x4_t b, simde_float16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vbsl_f16(a, b, c);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(simde_vreinterpret_u16_f16(b)),
c_ = simde_uint16x4_to_private(simde_vreinterpret_u16_f16(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_f16_u16(simde_uint16x4_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_f16
#define vbsl_f16(a, b, c) simde_vbsl_f16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vbsl_f32(simde_uint32x2_t a, simde_float32x2_t b, simde_float32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_f32(a, b, c);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(simde_vreinterpret_u32_f32(b)),
c_ = simde_uint32x2_to_private(simde_vreinterpret_u32_f32(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_f32_u32(simde_uint32x2_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_f32
#define vbsl_f32(a, b, c) simde_vbsl_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vbsl_f64(simde_uint64x1_t a, simde_float64x1_t b, simde_float64x1_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vbsl_f64(a, b, c);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(simde_vreinterpret_u64_f64(b)),
c_ = simde_uint64x1_to_private(simde_vreinterpret_u64_f64(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_f64_u64(simde_uint64x1_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vbsl_f64
#define vbsl_f64(a, b, c) simde_vbsl_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vbsl_s8(simde_uint8x8_t a, simde_int8x8_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_s8(a, b, c);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(simde_vreinterpret_u8_s8(b)),
c_ = simde_uint8x8_to_private(simde_vreinterpret_u8_s8(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_s8_u8(simde_uint8x8_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_s8
#define vbsl_s8(a, b, c) simde_vbsl_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vbsl_s16(simde_uint16x4_t a, simde_int16x4_t b, simde_int16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_s16(a, b, c);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(simde_vreinterpret_u16_s16(b)),
c_ = simde_uint16x4_to_private(simde_vreinterpret_u16_s16(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_s16_u16(simde_uint16x4_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_s16
#define vbsl_s16(a, b, c) simde_vbsl_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vbsl_s32(simde_uint32x2_t a, simde_int32x2_t b, simde_int32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_s32(a, b, c);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(simde_vreinterpret_u32_s32(b)),
c_ = simde_uint32x2_to_private(simde_vreinterpret_u32_s32(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_s32_u32(simde_uint32x2_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_s32
#define vbsl_s32(a, b, c) simde_vbsl_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vbsl_s64(simde_uint64x1_t a, simde_int64x1_t b, simde_int64x1_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_s64(a, b, c);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(simde_vreinterpret_u64_s64(b)),
c_ = simde_uint64x1_to_private(simde_vreinterpret_u64_s64(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpret_s64_u64(simde_uint64x1_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_s64
#define vbsl_s64(a, b, c) simde_vbsl_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vbsl_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_u8(a, b, c);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b),
c_ = simde_uint8x8_to_private(c);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_u8
#define vbsl_u8(a, b, c) simde_vbsl_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vbsl_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_u16(a, b, c);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b),
c_ = simde_uint16x4_to_private(c);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_u16
#define vbsl_u16(a, b, c) simde_vbsl_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vbsl_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_u32(a, b, c);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b),
c_ = simde_uint32x2_to_private(c);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_u32
#define vbsl_u32(a, b, c) simde_vbsl_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vbsl_u64(simde_uint64x1_t a, simde_uint64x1_t b, simde_uint64x1_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbsl_u64(a, b, c);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b),
c_ = simde_uint64x1_to_private(c);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbsl_u64
#define vbsl_u64(a, b, c) simde_vbsl_u64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float16x8_t
simde_vbslq_f16(simde_uint16x8_t a, simde_float16x8_t b, simde_float16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vbslq_f16(a, b, c);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_f16(b)),
c_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_f16(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_f16_u16(simde_uint16x8_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_f16
#define vbslq_f16(a, b, c) simde_vbslq_f16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vbslq_f32(simde_uint32x4_t a, simde_float32x4_t b, simde_float32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_f32(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_f32(b)),
c_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_f32(c));
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_f32_u32(simde_uint32x4_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_f32
#define vbslq_f32(a, b, c) simde_vbslq_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vbslq_f64(simde_uint64x2_t a, simde_float64x2_t b, simde_float64x2_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vbslq_f64(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_f64(b)),
c_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_f64(c));
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_f64_u64(simde_uint64x2_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vbslq_f64
#define vbslq_f64(a, b, c) simde_vbslq_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vbslq_s8(simde_uint8x16_t a, simde_int8x16_t b, simde_int8x16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_s8(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(simde_vreinterpretq_u8_s8(b)),
c_ = simde_uint8x16_to_private(simde_vreinterpretq_u8_s8(c));
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_s8_u8(simde_uint8x16_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_s8
#define vbslq_s8(a, b, c) simde_vbslq_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vbslq_s16(simde_uint16x8_t a, simde_int16x8_t b, simde_int16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_s16(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_s16(b)),
c_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_s16(c));
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_s16_u16(simde_uint16x8_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_s16
#define vbslq_s16(a, b, c) simde_vbslq_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vbslq_s32(simde_uint32x4_t a, simde_int32x4_t b, simde_int32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_s32(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_s32(b)),
c_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_s32(c));
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_s32_u32(simde_uint32x4_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_s32
#define vbslq_s32(a, b, c) simde_vbslq_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vbslq_s64(simde_uint64x2_t a, simde_int64x2_t b, simde_int64x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_s64(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return
simde_vreinterpretq_s64_s32(
simde_vbslq_s32(
simde_vreinterpretq_u32_u64(a),
simde_vreinterpretq_s32_s64(b),
simde_vreinterpretq_s32_s64(c)
)
);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(
HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), c),
HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), b),
HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), a));
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_s64(b)),
c_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_s64(c));
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_vreinterpretq_s64_u64(simde_uint64x2_from_private(r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_s64
#define vbslq_s64(a, b, c) simde_vbslq_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vbslq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_u8(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b),
c_ = simde_uint8x16_to_private(c);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_u8
#define vbslq_u8(a, b, c) simde_vbslq_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vbslq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_u16(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b),
c_ = simde_uint16x8_to_private(c);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_u16
#define vbslq_u16(a, b, c) simde_vbslq_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vbslq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_u32(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_sel(c, b, a);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b),
c_ = simde_uint32x4_to_private(c);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_u32
#define vbslq_u32(a, b, c) simde_vbslq_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vbslq_u64(simde_uint64x2_t a, simde_uint64x2_t b, simde_uint64x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vbslq_u64(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return
simde_vreinterpretq_u64_u32(
simde_vbslq_u32(
simde_vreinterpretq_u32_u64(a),
simde_vreinterpretq_u32_u64(b),
simde_vreinterpretq_u32_u64(c)
)
);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b),
c_ = simde_uint64x2_to_private(c);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vbslq_u64
#define vbslq_u64(a, b, c) simde_vbslq_u64((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_BSL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rndn.h | .h | 4,602 | 159 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_RNDN_H)
#define SIMDE_ARM_NEON_RNDN_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vrndns_f32(simde_float32_t a) {
#if \
defined(SIMDE_ARM_NEON_A32V8_NATIVE) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) && \
(!defined(HEDLEY_GCC_VERSION) || (defined(SIMDE_ARM_NEON_A64V8_NATIVE) && HEDLEY_GCC_VERSION_CHECK(8,0,0)))
return vrndns_f32(a);
#else
return simde_math_roundevenf(a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vrndns_f32
#define vrndns_f32(a) simde_vrndns_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vrndn_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
return vrndn_f32(a);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vrndns_f32(a_.values[i]);
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vrndn_f32
#define vrndn_f32(a) simde_vrndn_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vrndn_f64(simde_float64x1_t a) {
#if \
defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrndn_f64(a);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_roundeven(a_.values[i]);
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vrndn_f64
#define vrndn_f64(a) simde_vrndn_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vrndnq_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
return vrndnq_f32(a);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_NEAREST_INT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vrndns_f32(a_.values[i]);
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vrndnq_f32
#define vrndnq_f32(a) simde_vrndnq_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vrndnq_f64(simde_float64x2_t a) {
#if \
defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrndnq_f64(a);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_NEAREST_INT);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_roundeven(a_.values[i]);
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vrndnq_f64
#define vrndnq_f64(a) simde_vrndnq_f64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RNDN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlsl.h | .h | 3,888 | 125 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MLSL_H)
#define SIMDE_ARM_NEON_MLSL_H
#include "mull.h"
#include "sub.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlsl_s8(simde_int16x8_t a, simde_int8x8_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsl_s8(a, b, c);
#else
return simde_vsubq_s16(a, simde_vmull_s8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsl_s8
#define vmlsl_s8(a, b, c) simde_vmlsl_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlsl_s16(simde_int32x4_t a, simde_int16x4_t b, simde_int16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsl_s16(a, b, c);
#else
return simde_vsubq_s32(a, simde_vmull_s16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsl_s16
#define vmlsl_s16(a, b, c) simde_vmlsl_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmlsl_s32(simde_int64x2_t a, simde_int32x2_t b, simde_int32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsl_s32(a, b, c);
#else
return simde_vsubq_s64(a, simde_vmull_s32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsl_s32
#define vmlsl_s32(a, b, c) simde_vmlsl_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlsl_u8(simde_uint16x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsl_u8(a, b, c);
#else
return simde_vsubq_u16(a, simde_vmull_u8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsl_u8
#define vmlsl_u8(a, b, c) simde_vmlsl_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlsl_u16(simde_uint32x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsl_u16(a, b, c);
#else
return simde_vsubq_u32(a, simde_vmull_u16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsl_u16
#define vmlsl_u16(a, b, c) simde_vmlsl_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmlsl_u32(simde_uint64x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsl_u32(a, b, c);
#else
return simde_vsubq_u64(a, simde_vmull_u32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsl_u32
#define vmlsl_u32(a, b, c) simde_vmlsl_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLSL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/movn_high.h | .h | 3,905 | 126 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MOVN_HIGH_H)
#define SIMDE_ARM_NEON_MOVN_HIGH_H
#include "types.h"
#include "movn.h"
#include "combine.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vmovn_high_s16(simde_int8x8_t r, simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovn_high_s16(r, a);
#else
return simde_vcombine_s8(r, simde_vmovn_s16(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovn_high_s16
#define vmovn_high_s16(r, a) simde_vmovn_high_s16((r), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmovn_high_s32(simde_int16x4_t r, simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovn_high_s32(r, a);
#else
return simde_vcombine_s16(r, simde_vmovn_s32(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovn_high_s32
#define vmovn_high_s32(r, a) simde_vmovn_high_s32((r), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmovn_high_s64(simde_int32x2_t r, simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovn_high_s64(r, a);
#else
return simde_vcombine_s32(r, simde_vmovn_s64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovn_high_s64
#define vmovn_high_s64(r, a) simde_vmovn_high_s64((r), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vmovn_high_u16(simde_uint8x8_t r, simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovn_high_u16(r, a);
#else
return simde_vcombine_u8(r, simde_vmovn_u16(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovn_high_u16
#define vmovn_high_u16(r, a) simde_vmovn_high_u16((r), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmovn_high_u32(simde_uint16x4_t r, simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovn_high_u32(r, a);
#else
return simde_vcombine_u16(r, simde_vmovn_u32(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovn_high_u32
#define vmovn_high_u32(r, a) simde_vmovn_high_u32((r), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmovn_high_u64(simde_uint32x2_t r, simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovn_high_u64(r, a);
#else
return simde_vcombine_u32(r, simde_vmovn_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovn_high_u64
#define vmovn_high_u64(r, a) simde_vmovn_high_u64((r), (a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MOVN_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/subhn.h | .h | 7,275 | 212 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_SUBHN_H)
#define SIMDE_ARM_NEON_SUBHN_H
#include "sub.h"
#include "shr_n.h"
#include "movn.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vsubhn_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubhn_s16(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_int8x8_private r_;
simde_int8x16_private tmp_ =
simde_int8x16_to_private(
simde_vreinterpretq_s8_s16(
simde_vsubq_s16(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#endif
return simde_int8x8_from_private(r_);
#else
return simde_vmovn_s16(simde_vshrq_n_s16(simde_vsubq_s16(a, b), 8));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubhn_s16
#define vsubhn_s16(a, b) simde_vsubhn_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vsubhn_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubhn_s32(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_int16x4_private r_;
simde_int16x8_private tmp_ =
simde_int16x8_to_private(
simde_vreinterpretq_s16_s32(
simde_vsubq_s32(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6);
#endif
return simde_int16x4_from_private(r_);
#else
return simde_vmovn_s32(simde_vshrq_n_s32(simde_vsubq_s32(a, b), 16));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubhn_s32
#define vsubhn_s32(a, b) simde_vsubhn_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vsubhn_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubhn_s64(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_int32x2_private r_;
simde_int32x4_private tmp_ =
simde_int32x4_to_private(
simde_vreinterpretq_s32_s64(
simde_vsubq_s64(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2);
#endif
return simde_int32x2_from_private(r_);
#else
return simde_vmovn_s64(simde_vshrq_n_s64(simde_vsubq_s64(a, b), 32));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubhn_s64
#define vsubhn_s64(a, b) simde_vsubhn_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vsubhn_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubhn_u16(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_uint8x8_private r_;
simde_uint8x16_private tmp_ =
simde_uint8x16_to_private(
simde_vreinterpretq_u8_u16(
simde_vsubq_u16(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#endif
return simde_uint8x8_from_private(r_);
#else
return simde_vmovn_u16(simde_vshrq_n_u16(simde_vsubq_u16(a, b), 8));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubhn_u16
#define vsubhn_u16(a, b) simde_vsubhn_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vsubhn_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubhn_u32(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_uint16x4_private r_;
simde_uint16x8_private tmp_ =
simde_uint16x8_to_private(
simde_vreinterpretq_u16_u32(
simde_vsubq_u32(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6);
#endif
return simde_uint16x4_from_private(r_);
#else
return simde_vmovn_u32(simde_vshrq_n_u32(simde_vsubq_u32(a, b), 16));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubhn_u32
#define vsubhn_u32(a, b) simde_vsubhn_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vsubhn_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubhn_u64(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_uint32x2_private r_;
simde_uint32x4_private tmp_ =
simde_uint32x4_to_private(
simde_vreinterpretq_u32_u64(
simde_vsubq_u64(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2);
#endif
return simde_uint32x2_from_private(r_);
#else
return simde_vmovn_u64(simde_vshrq_n_u64(simde_vsubq_u64(a, b), 32));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubhn_u64
#define vsubhn_u64(a, b) simde_vsubhn_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SUBHN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rev64.h | .h | 11,921 | 355 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
/* N.B. CM: vrev64_f16 and vrev64q_f16 are omitted as
* SIMDe has no 16-bit floating point support. */
#if !defined(SIMDE_ARM_NEON_REV64_H)
#define SIMDE_ARM_NEON_REV64_H
#include "reinterpret.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vrev64_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_s8(a);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_set_pi8(0, 1, 2, 3, 4, 5, 6, 7));
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, a_.values, 7, 6, 5, 4, 3, 2, 1, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 7];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_s8
#define vrev64_s8(a) simde_vrev64_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vrev64_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_s16(a);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a);
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_shuffle_pi16(a_.m64, (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, a_.values, 3, 2, 1, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 3];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_s16
#define vrev64_s16(a) simde_vrev64_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vrev64_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_s32(a);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a);
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_shuffle_pi16(a_.m64, (1 << 6) | (0 << 4) | (3 << 2) | (2 << 0));
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 1, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 1];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_s32
#define vrev64_s32(a) simde_vrev64_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vrev64_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_u8(a);
#else
return simde_vreinterpret_u8_s8(simde_vrev64_s8(simde_vreinterpret_s8_u8(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_u8
#define vrev64_u8(a) simde_vrev64_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vrev64_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_u16(a);
#else
return simde_vreinterpret_u16_s16(simde_vrev64_s16(simde_vreinterpret_s16_u16(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_u16
#define vrev64_u16(a) simde_vrev64_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vrev64_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_u32(a);
#else
return simde_vreinterpret_u32_s32(simde_vrev64_s32(simde_vreinterpret_s32_u32(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_u32
#define vrev64_u32(a) simde_vrev64_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vrev64_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64_f32(a);
#else
return simde_vreinterpret_f32_s32(simde_vrev64_s32(simde_vreinterpret_s32_f32(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64_f32
#define vrev64_f32(a) simde_vrev64_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vrev64q_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_s8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char),
vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), a)));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char),
vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_reve(a))));
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15,
0, 1, 2, 3, 4, 5, 6, 7));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 7];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_s8
#define vrev64q_s8(a) simde_vrev64q_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vrev64q_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_s16(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short),
vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_reve(a))));
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a);
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(9, 8, 11, 10, 13, 12, 15, 14,
1, 0, 3, 2, 5, 4, 7, 6));
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_shufflehi_epi16(_mm_shufflelo_epi16(a_.m128i,
(0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)),
(0 << 6) | (1 << 4) | (2 << 2) | (3 << 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, a_.values, 3, 2, 1, 0, 7, 6, 5, 4);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 3];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_s16
#define vrev64q_s16(a) simde_vrev64q_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vrev64q_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_s32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int),
vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_reve(a))));
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_shuffle_epi32(a_.m128i, (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 1, 0, 3, 2);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 1];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_s32
#define vrev64q_s32(a) simde_vrev64q_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vrev64q_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_u8(a);
#else
return simde_vreinterpretq_u8_s8(simde_vrev64q_s8(simde_vreinterpretq_s8_u8(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_u8
#define vrev64q_u8(a) simde_vrev64q_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vrev64q_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_u16(a);
#else
return simde_vreinterpretq_u16_s16(simde_vrev64q_s16(simde_vreinterpretq_s16_u16(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_u16
#define vrev64q_u16(a) simde_vrev64q_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vrev64q_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_u32(a);
#else
return simde_vreinterpretq_u32_s32(simde_vrev64q_s32(simde_vreinterpretq_s32_u32(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_u32
#define vrev64q_u32(a) simde_vrev64q_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vrev64q_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev64q_f32(a);
#else
return simde_vreinterpretq_f32_s32(simde_vrev64q_s32(simde_vreinterpretq_s32_f32(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev64q_f32
#define vrev64q_f32(a) simde_vrev64q_f32(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_REV64_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/uqadd.h | .h | 10,603 | 337 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_UQADD_H)
#define SIMDE_ARM_NEON_UQADD_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
// Workaround on ARM64 windows due to windows SDK bug
// https://developercommunity.visualstudio.com/t/In-arm64_neonh-vsqaddb_u8-vsqaddh_u16/10271747?sort=newest
#if (defined _MSC_VER) && (defined SIMDE_ARM_NEON_A64V8_NATIVE)
#undef vuqaddh_s16
#define vuqaddh_s16(src1, src2) neon_suqadds16(__int16ToN16_v(src1), __uint16ToN16_v(src2)).n16_i16[0]
#undef vuqadds_s32
#define vuqadds_s32(src1, src2) _CopyInt32FromFloat(neon_suqadds32(_CopyFloatFromInt32(src1), _CopyFloatFromUInt32(src2)))
#undef vuqaddd_s64
#define vuqaddd_s64(src1, src2) neon_suqadds64(__int64ToN64_v(src1), __uint64ToN64_v(src2)).n64_i64[0]
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vuqaddb_s8(int8_t a, uint8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_GIT_4EC445B8)
return vuqaddb_s8(a, HEDLEY_STATIC_CAST(int8_t, b));
#else
return vuqaddb_s8(a, b);
#endif
#else
int16_t r_ = HEDLEY_STATIC_CAST(int16_t, a) + HEDLEY_STATIC_CAST(int16_t, b);
return (r_ < INT8_MIN) ? INT8_MIN : ((r_ > INT8_MAX) ? INT8_MAX : HEDLEY_STATIC_CAST(int8_t, r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddb_s8
#define vuqaddb_s8(a, b) simde_vuqaddb_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vuqaddh_s16(int16_t a, uint16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_GIT_4EC445B8)
return vuqaddh_s16(a, HEDLEY_STATIC_CAST(int16_t, b));
#else
return vuqaddh_s16(a, b);
#endif
#else
int32_t r_ = HEDLEY_STATIC_CAST(int32_t, a) + HEDLEY_STATIC_CAST(int32_t, b);
return (r_ < INT16_MIN) ? INT16_MIN : ((r_ > INT16_MAX) ? INT16_MAX : HEDLEY_STATIC_CAST(int16_t, r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddh_s16
#define vuqaddh_s16(a, b) simde_vuqaddh_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vuqadds_s32(int32_t a, uint32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_GIT_4EC445B8)
return vuqadds_s32(a, HEDLEY_STATIC_CAST(int32_t, b));
#else
return vuqadds_s32(a, b);
#endif
#else
int64_t r_ = HEDLEY_STATIC_CAST(int64_t, a) + HEDLEY_STATIC_CAST(int64_t, b);
return (r_ < INT32_MIN) ? INT32_MIN : ((r_ > INT32_MAX) ? INT32_MAX : HEDLEY_STATIC_CAST(int32_t, r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqadds_s32
#define vuqadds_s32(a, b) simde_vuqadds_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vuqaddd_s64(int64_t a, uint64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_GIT_4EC445B8)
return vuqaddd_s64(a, HEDLEY_STATIC_CAST(int64_t, b));
#else
return vuqaddd_s64(a, b);
#endif
#else
/* TODO: I suspect there is room for improvement here. This is
* just the first thing that worked, and I don't feel like messing
* with it now. */
int64_t r;
if (a < 0) {
uint64_t na = HEDLEY_STATIC_CAST(uint64_t, -a);
if (na > b) {
uint64_t t = na - b;
r = (t > (HEDLEY_STATIC_CAST(uint64_t, INT64_MAX) + 1)) ? INT64_MIN : -HEDLEY_STATIC_CAST(int64_t, t);
} else {
uint64_t t = b - na;
r = (t > (HEDLEY_STATIC_CAST(uint64_t, INT64_MAX) )) ? INT64_MAX : HEDLEY_STATIC_CAST(int64_t, t);
}
} else {
uint64_t ua = HEDLEY_STATIC_CAST(uint64_t, a);
r = ((INT64_MAX - ua) < b) ? INT64_MAX : HEDLEY_STATIC_CAST(int64_t, ua + b);
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddd_s64
#define vuqaddd_s64(a, b) simde_vuqaddd_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vuqadd_s8(simde_int8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqadd_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
simde_uint8x8_private b_ = simde_uint8x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqaddb_s8(a_.values[i], b_.values[i]);
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqadd_s8
#define vuqadd_s8(a, b) simde_vuqadd_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vuqadd_s16(simde_int16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqadd_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a);
simde_uint16x4_private b_ = simde_uint16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqaddh_s16(a_.values[i], b_.values[i]);
}
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqadd_s16
#define vuqadd_s16(a, b) simde_vuqadd_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vuqadd_s32(simde_int32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqadd_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a);
simde_uint32x2_private b_ = simde_uint32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqadds_s32(a_.values[i], b_.values[i]);
}
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqadd_s32
#define vuqadd_s32(a, b) simde_vuqadd_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vuqadd_s64(simde_int64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqadd_s64(a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a);
simde_uint64x1_private b_ = simde_uint64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqaddd_s64(a_.values[i], b_.values[i]);
}
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqadd_s64
#define vuqadd_s64(a, b) simde_vuqadd_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vuqaddq_s8(simde_int8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqaddq_s8(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
simde_uint8x16_private b_ = simde_uint8x16_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqaddb_s8(a_.values[i], b_.values[i]);
}
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddq_s8
#define vuqaddq_s8(a, b) simde_vuqaddq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vuqaddq_s16(simde_int16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqaddq_s16(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a);
simde_uint16x8_private b_ = simde_uint16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqaddh_s16(a_.values[i], b_.values[i]);
}
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddq_s16
#define vuqaddq_s16(a, b) simde_vuqaddq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vuqaddq_s32(simde_int32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqaddq_s32(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a);
simde_uint32x4_private b_ = simde_uint32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqadds_s32(a_.values[i], b_.values[i]);
}
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddq_s32
#define vuqaddq_s32(a, b) simde_vuqaddq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vuqaddq_s64(simde_int64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuqaddq_s64(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a);
simde_uint64x2_private b_ = simde_uint64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vuqaddd_s64(a_.values[i], b_.values[i]);
}
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuqaddq_s64
#define vuqaddq_s64(a, b) simde_vuqaddq_s64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_UQADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qshl.h | .h | 19,189 | 733 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_QSHL_H)
#define SIMDE_ARM_NEON_QSHL_H
#include "types.h"
#include "cls.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vqshlb_s8(int8_t a, int8_t b) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vqshlb_s8(a, b);
#else
if (b < -7)
b = -7;
if (b <= 0) {
r = a >> -b;
} else if (b < 7) {
r = HEDLEY_STATIC_CAST(int8_t, a << b);
if ((r >> b) != a) {
r = (a < 0) ? INT8_MIN : INT8_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = (a < 0) ? INT8_MIN : INT8_MAX;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlb_s8
#define vqshlb_s8(a, b) simde_vqshlb_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vqshlh_s16(int16_t a, int16_t b) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vqshlh_s16(a, b);
#else
int8_t b8 = HEDLEY_STATIC_CAST(int8_t, b);
if (b8 < -15)
b8 = -15;
if (b8 <= 0) {
r = a >> -b8;
} else if (b8 < 15) {
r = HEDLEY_STATIC_CAST(int16_t, a << b8);
if ((r >> b8) != a) {
r = (a < 0) ? INT16_MIN : INT16_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = (a < 0) ? INT16_MIN : INT16_MAX;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlh_s16
#define vqshlh_s16(a, b) simde_vqshlh_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vqshls_s32(int32_t a, int32_t b) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vqshls_s32(a, b);
#else
int8_t b8 = HEDLEY_STATIC_CAST(int8_t, b);
if (b8 < -31)
b8 = -31;
if (b8 <= 0) {
r = a >> -b8;
} else if (b8 < 31) {
r = HEDLEY_STATIC_CAST(int32_t, a << b8);
if ((r >> b8) != a) {
r = (a < 0) ? INT32_MIN : INT32_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = (a < 0) ? INT32_MIN : INT32_MAX;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshls_s32
#define vqshls_s32(a, b) simde_vqshls_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vqshld_s64(int64_t a, int64_t b) {
int64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vqshld_s64(a, b);
#else
int8_t b8 = HEDLEY_STATIC_CAST(int8_t, b);
if (b8 < -63)
b8 = -63;
if (b8 <= 0) {
r = a >> -b8;
} else if (b8 < 63) {
r = HEDLEY_STATIC_CAST(int64_t, a << b8);
if ((r >> b8) != a) {
r = (a < 0) ? INT64_MIN : INT64_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = (a < 0) ? INT64_MIN : INT64_MAX;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshld_s64
#define vqshld_s64(a, b) simde_vqshld_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vqshlb_u8(uint8_t a, int8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0)
return vqshlb_u8(a, HEDLEY_STATIC_CAST(uint8_t, b));
#elif HEDLEY_HAS_WARNING("-Wsign-conversion")
/* https://github.com/llvm/llvm-project/commit/f0a78bdfdc6d56b25e0081884580b3960a3c2429 */
HEDLEY_DIAGNOSTIC_PUSH
#pragma clang diagnostic ignored "-Wsign-conversion"
return vqshlb_u8(a, b);
HEDLEY_DIAGNOSTIC_POP
#else
return vqshlb_u8(a, b);
#endif
#else
uint8_t r;
if (b < -7)
b = -7;
if (b <= 0) {
r = a >> -b;
} else if (b < 7) {
r = HEDLEY_STATIC_CAST(uint8_t, a << b);
if ((r >> b) != a) {
r = UINT8_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = UINT8_MAX;
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlb_u8
#define vqshlb_u8(a, b) simde_vqshlb_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vqshlh_u16(uint16_t a, int16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0)
return vqshlh_u16(a, HEDLEY_STATIC_CAST(uint16_t, b));
#elif HEDLEY_HAS_WARNING("-Wsign-conversion")
HEDLEY_DIAGNOSTIC_PUSH
#pragma clang diagnostic ignored "-Wsign-conversion"
return vqshlh_u16(a, b);
HEDLEY_DIAGNOSTIC_POP
#else
return vqshlh_u16(a, b);
#endif
#else
uint16_t r;
if (b < -15)
b = -15;
if (b <= 0) {
r = a >> -b;
} else if (b < 15) {
r = HEDLEY_STATIC_CAST(uint16_t, a << b);
if ((r >> b) != a) {
r = UINT16_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = UINT16_MAX;
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlh_u16
#define vqshlh_u16(a, b) simde_vqshlh_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vqshls_u32(uint32_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0)
return vqshls_u32(a, HEDLEY_STATIC_CAST(uint16_t, b));
#elif HEDLEY_HAS_WARNING("-Wsign-conversion")
HEDLEY_DIAGNOSTIC_PUSH
#pragma clang diagnostic ignored "-Wsign-conversion"
return vqshls_u32(a, b);
HEDLEY_DIAGNOSTIC_POP
#else
return vqshls_u32(a, b);
#endif
#else
uint32_t r;
if (b < -31)
b = -31;
if (b <= 0) {
r = HEDLEY_STATIC_CAST(uint32_t, a >> -b);
} else if (b < 31) {
r = a << b;
if ((r >> b) != a) {
r = UINT32_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = UINT32_MAX;
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshls_u32
#define vqshls_u32(a, b) simde_vqshls_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vqshld_u64(uint64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(HEDLEY_GCC_VERSION) && !HEDLEY_GCC_VERSION_CHECK(11,0,0)
return vqshld_u64(a, HEDLEY_STATIC_CAST(uint16_t, b));
#elif HEDLEY_HAS_WARNING("-Wsign-conversion")
HEDLEY_DIAGNOSTIC_PUSH
#pragma clang diagnostic ignored "-Wsign-conversion"
return vqshld_u64(a, b);
HEDLEY_DIAGNOSTIC_POP
#else
return vqshld_u64(a, b);
#endif
#else
uint64_t r;
if (b < -63)
b = -63;
if (b <= 0) {
r = a >> -b;
} else if (b < 63) {
r = HEDLEY_STATIC_CAST(uint64_t, a << b);
if ((r >> b) != a) {
r = UINT64_MAX;
}
} else if (a == 0) {
r = 0;
} else {
r = UINT64_MAX;
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshldb_u64
#define vqshld_u64(a, b) simde_vqshld_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqshl_s8 (const simde_int8x8_t a, const simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlb_s8(a_.values[i], b_.values[i]);
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_s8
#define vqshl_s8(a, b) simde_vqshl_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vqshl_s16 (const simde_int16x4_t a, const simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlh_s16(a_.values[i], b_.values[i]);
}
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_s16
#define vqshl_s16(a, b) simde_vqshl_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vqshl_s32 (const simde_int32x2_t a, const simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshls_s32(a_.values[i], b_.values[i]);
}
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_s32
#define vqshl_s32(a, b) simde_vqshl_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vqshl_s64 (const simde_int64x1_t a, const simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_s64(a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshld_s64(a_.values[i], b_.values[i]);
}
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_s64
#define vqshl_s64(a, b) simde_vqshl_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqshl_u8 (const simde_uint8x8_t a, const simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a);
simde_int8x8_private
b_ = simde_int8x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlb_u8(a_.values[i], b_.values[i]);
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_u8
#define vqshl_u8(a, b) simde_vqshl_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vqshl_u16 (const simde_uint16x4_t a, const simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a);
simde_int16x4_private
b_ = simde_int16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlh_u16(a_.values[i], b_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_u16
#define vqshl_u16(a, b) simde_vqshl_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vqshl_u32 (const simde_uint32x2_t a, const simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a);
simde_int32x2_private
b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshls_u32(a_.values[i], b_.values[i]);
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_u32
#define vqshl_u32(a, b) simde_vqshl_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vqshl_u64 (const simde_uint64x1_t a, const simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshl_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a);
simde_int64x1_private
b_ = simde_int64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshld_u64(a_.values[i], b_.values[i]);
}
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshl_u64
#define vqshl_u64(a, b) simde_vqshl_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_s8(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlb_s8(a_.values[i], b_.values[i]);
}
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_s8
#define vqshlq_s8(a, b) simde_vqshlq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vqshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_s16(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlh_s16(a_.values[i], b_.values[i]);
}
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_s16
#define vqshlq_s16(a, b) simde_vqshlq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vqshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_s32(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshls_s32(a_.values[i], b_.values[i]);
}
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_s32
#define vqshlq_s32(a, b) simde_vqshlq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vqshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_s64(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshld_s64(a_.values[i], b_.values[i]);
}
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_s64
#define vqshlq_s64(a, b) simde_vqshlq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_u8(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a);
simde_int8x16_private
b_ = simde_int8x16_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlb_u8(a_.values[i], b_.values[i]);
}
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_u8
#define vqshlq_u8(a, b) simde_vqshlq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vqshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_u16(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a);
simde_int16x8_private
b_ = simde_int16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshlh_u16(a_.values[i], b_.values[i]);
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_u16
#define vqshlq_u16(a, b) simde_vqshlq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vqshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_u32(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a);
simde_int32x4_private
b_ = simde_int32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshls_u32(a_.values[i], b_.values[i]);
}
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_u32
#define vqshlq_u32(a, b) simde_vqshlq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vqshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqshlq_u64(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a);
simde_int64x2_private
b_ = simde_int64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqshld_u64(a_.values[i], b_.values[i]);
}
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlq_u64
#define vqshlq_u64(a, b) simde_vqshlq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QSHL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlsl_high_n.h | .h | 4,299 | 129 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Décio Luiz Gazzoni Filho <decio@decpp.net>
*/
#if !defined(SIMDE_ARM_NEON_MLSL_HIGH_N_H)
#define SIMDE_ARM_NEON_MLSL_HIGH_N_H
#include "movl_high.h"
#include "dup_n.h"
#include "mls.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlsl_high_n_s16(simde_int32x4_t a, simde_int16x8_t b, int16_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_n_s16(a, b, c);
#else
return simde_vmlsq_s32(a, simde_vmovl_high_s16(b), simde_vdupq_n_s32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_n_s16
#define vmlsl_high_n_s16(a, b, c) simde_vmlsl_high_n_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmlsl_high_n_s32(simde_int64x2_t a, simde_int32x4_t b, int32_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_n_s32(a, b, c);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(simde_vmovl_high_s32(b)),
c_ = simde_int64x2_to_private(simde_vdupq_n_s64(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - (b_.values * c_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - (b_.values[i] * c_.values[i]);
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_n_s32
#define vmlsl_high_n_s32(a, b, c) simde_vmlsl_high_n_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlsl_high_n_u16(simde_uint32x4_t a, simde_uint16x8_t b, uint16_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_n_u16(a, b, c);
#else
return simde_vmlsq_u32(a, simde_vmovl_high_u16(b), simde_vdupq_n_u32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_n_u16
#define vmlsl_high_n_u16(a, b, c) simde_vmlsl_high_n_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmlsl_high_n_u32(simde_uint64x2_t a, simde_uint32x4_t b, uint32_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsl_high_n_u32(a, b, c);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(simde_vmovl_high_u32(b)),
c_ = simde_uint64x2_to_private(simde_vdupq_n_u64(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - (b_.values * c_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - (b_.values[i] * c_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlsl_high_n_u32
#define vmlsl_high_n_u32(a, b, c) simde_vmlsl_high_n_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLSL_HIGH_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/addw_high.h | .h | 6,592 | 192 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_ADDW_HIGH_H)
#define SIMDE_ARM_NEON_ADDW_HIGH_H
#include "types.h"
#include "movl_high.h"
#include "add.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vaddw_high_s8(simde_int16x8_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddw_high_s8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_s16(a, simde_vmovl_high_s8(b));
#else
simde_int16x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_int8x16_private b_ = simde_int8x16_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddw_high_s8
#define vaddw_high_s8(a, b) simde_vaddw_high_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vaddw_high_s16(simde_int32x4_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddw_high_s16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_s32(a, simde_vmovl_high_s16(b));
#else
simde_int32x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_int16x8_private b_ = simde_int16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddw_high_s16
#define vaddw_high_s16(a, b) simde_vaddw_high_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vaddw_high_s32(simde_int64x2_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddw_high_s32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_s64(a, simde_vmovl_high_s32(b));
#else
simde_int64x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_int32x4_private b_ = simde_int32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddw_high_s32
#define vaddw_high_s32(a, b) simde_vaddw_high_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vaddw_high_u8(simde_uint16x8_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddw_high_u8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_u16(a, simde_vmovl_high_u8(b));
#else
simde_uint16x8_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_uint8x16_private b_ = simde_uint8x16_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddw_high_u8
#define vaddw_high_u8(a, b) simde_vaddw_high_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vaddw_high_u16(simde_uint32x4_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddw_high_u16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_u32(a, simde_vmovl_high_u16(b));
#else
simde_uint32x4_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_uint16x8_private b_ = simde_uint16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddw_high_u16
#define vaddw_high_u16(a, b) simde_vaddw_high_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vaddw_high_u32(simde_uint64x2_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddw_high_u32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_u64(a, simde_vmovl_high_u32(b));
#else
simde_uint64x2_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_uint32x4_private b_ = simde_uint32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddw_high_u32
#define vaddw_high_u32(a, b) simde_vaddw_high_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ADDW_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1q_x3.h | .h | 10,825 | 288 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_LD1Q_X3_H)
#define SIMDE_ARM_NEON_LD1Q_X3_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_
#endif
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4x3_t
simde_vld1q_f32_x3(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(12)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_f32_x3(ptr);
#else
simde_float32x4_private a_[3];
for (size_t i = 0; i < 12; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_float32x4x3_t s_ = { { simde_float32x4_from_private(a_[0]),
simde_float32x4_from_private(a_[1]),
simde_float32x4_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_f32_x3
#define vld1q_f32_x3(a) simde_vld1q_f32_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2x3_t
simde_vld1q_f64_x3(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(6)]) {
#if \
defined(SIMDE_ARM_NEON_A64V8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))
return vld1q_f64_x3(ptr);
#else
simde_float64x2_private a_[3];
for (size_t i = 0; i < 6; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_float64x2x3_t s_ = { { simde_float64x2_from_private(a_[0]),
simde_float64x2_from_private(a_[1]),
simde_float64x2_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1q_f64_x3
#define vld1q_f64_x3(a) simde_vld1q_f64_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16x3_t
simde_vld1q_s8_x3(int8_t const ptr[HEDLEY_ARRAY_PARAM(48)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s8_x3(ptr);
#else
simde_int8x16_private a_[3];
for (size_t i = 0; i < 48; i++) {
a_[i / 16].values[i % 16] = ptr[i];
}
simde_int8x16x3_t s_ = { { simde_int8x16_from_private(a_[0]),
simde_int8x16_from_private(a_[1]),
simde_int8x16_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s8_x3
#define vld1q_s8_x3(a) simde_vld1q_s8_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8x3_t
simde_vld1q_s16_x3(int16_t const ptr[HEDLEY_ARRAY_PARAM(12)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s16_x3(ptr);
#else
simde_int16x8_private a_[3];
for (size_t i = 0; i < 24; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_int16x8x3_t s_ = { { simde_int16x8_from_private(a_[0]),
simde_int16x8_from_private(a_[1]),
simde_int16x8_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s16_x3
#define vld1q_s16_x3(a) simde_vld1q_s16_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4x3_t
simde_vld1q_s32_x3(int32_t const ptr[HEDLEY_ARRAY_PARAM(6)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s32_x3(ptr);
#else
simde_int32x4_private a_[3];
for (size_t i = 0; i < 12; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_int32x4x3_t s_ = { { simde_int32x4_from_private(a_[0]),
simde_int32x4_from_private(a_[1]),
simde_int32x4_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s32_x3
#define vld1q_s32_x3(a) simde_vld1q_s32_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2x3_t
simde_vld1q_s64_x3(int64_t const ptr[HEDLEY_ARRAY_PARAM(3)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s64_x3(ptr);
#else
simde_int64x2_private a_[3];
for (size_t i = 0; i < 6; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_int64x2x3_t s_ = { { simde_int64x2_from_private(a_[0]),
simde_int64x2_from_private(a_[1]),
simde_int64x2_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s64_x3
#define vld1q_s64_x3(a) simde_vld1q_s64_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16x3_t
simde_vld1q_u8_x3(uint8_t const ptr[HEDLEY_ARRAY_PARAM(48)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u8_x3(ptr);
#else
simde_uint8x16_private a_[3];
for (size_t i = 0; i < 48; i++) {
a_[i / 16].values[i % 16] = ptr[i];
}
simde_uint8x16x3_t s_ = { { simde_uint8x16_from_private(a_[0]),
simde_uint8x16_from_private(a_[1]),
simde_uint8x16_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u8_x3
#define vld1q_u8_x3(a) simde_vld1q_u8_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8x3_t
simde_vld1q_u16_x3(uint16_t const ptr[HEDLEY_ARRAY_PARAM(24)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u16_x3(ptr);
#else
simde_uint16x8_private a_[3];
for (size_t i = 0; i < 24; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_uint16x8x3_t s_ = { { simde_uint16x8_from_private(a_[0]),
simde_uint16x8_from_private(a_[1]),
simde_uint16x8_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u16_x3
#define vld1q_u16_x3(a) simde_vld1q_u16_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4x3_t
simde_vld1q_u32_x3(uint32_t const ptr[HEDLEY_ARRAY_PARAM(6)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u32_x3(ptr);
#else
simde_uint32x4_private a_[3];
for (size_t i = 0; i < 12; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_uint32x4x3_t s_ = { { simde_uint32x4_from_private(a_[0]),
simde_uint32x4_from_private(a_[1]),
simde_uint32x4_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u32_x3
#define vld1q_u32_x3(a) simde_vld1q_u32_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2x3_t
simde_vld1q_u64_x3(uint64_t const ptr[HEDLEY_ARRAY_PARAM(3)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u64_x3(ptr);
#else
simde_uint64x2_private a_[3];
for (size_t i = 0; i < 6; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_uint64x2x3_t s_ = { { simde_uint64x2_from_private(a_[0]),
simde_uint64x2_from_private(a_[1]),
simde_uint64x2_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u64_x3
#define vld1q_u64_x3(a) simde_vld1q_u64_x3((a))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1Q_X3_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rnd.h | .h | 4,379 | 148 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_RND_H)
#define SIMDE_ARM_NEON_RND_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vrnd_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
return vrnd_f32(a);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_truncf(a_.values[i]);
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrnd_f32
#define vrnd_f32(a) simde_vrnd_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vrnd_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrnd_f64(a);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_trunc(a_.values[i]);
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrnd_f64
#define vrnd_f64(a) simde_vrnd_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vrndq_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
return vrndq_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_trunc(a);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_ZERO);
#elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_trunc_ps(a_.m128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_truncf(a_.values[i]);
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrndq_f32
#define vrndq_f32(a) simde_vrndq_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vrndq_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrndq_f64(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_trunc(a);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_ZERO);
#elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
r_.m128d = _mm_trunc_pd(a_.m128d);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_trunc(a_.values[i]);
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrndq_f64
#define vrndq_f64(a) simde_vrndq_f64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/addv.h | .h | 10,727 | 453 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_ADDV_H)
#define SIMDE_ARM_NEON_ADDV_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vaddv_f32(simde_float32x2_t a) {
simde_float32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_f32(a);
#else
simde_float32x2_private a_ = simde_float32x2_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_f32
#define vaddv_f32(v) simde_vaddv_f32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vaddv_s8(simde_int8x8_t a) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_s8(a);
#else
simde_int8x8_private a_ = simde_int8x8_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_s8
#define vaddv_s8(v) simde_vaddv_s8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vaddv_s16(simde_int16x4_t a) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_s16(a);
#else
simde_int16x4_private a_ = simde_int16x4_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_s16
#define vaddv_s16(v) simde_vaddv_s16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vaddv_s32(simde_int32x2_t a) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_s32(a);
#else
simde_int32x2_private a_ = simde_int32x2_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_s32
#define vaddv_s32(v) simde_vaddv_s32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vaddv_u8(simde_uint8x8_t a) {
uint8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_u8(a);
#else
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_u8
#define vaddv_u8(v) simde_vaddv_u8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vaddv_u16(simde_uint16x4_t a) {
uint16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_u16(a);
#else
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_u16
#define vaddv_u16(v) simde_vaddv_u16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vaddv_u32(simde_uint32x2_t a) {
uint32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddv_u32(a);
#else
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddv_u32
#define vaddv_u32(v) simde_vaddv_u32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vaddvq_f32(simde_float32x4_t a) {
simde_float32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_f32(a);
#else
simde_float32x4_private a_ = simde_float32x4_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_f32
#define vaddvq_f32(v) simde_vaddvq_f32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vaddvq_f64(simde_float64x2_t a) {
simde_float64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_f64(a);
#else
simde_float64x2_private a_ = simde_float64x2_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_f64
#define vaddvq_f64(v) simde_vaddvq_f64(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vaddvq_s8(simde_int8x16_t a) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_s8(a);
#else
simde_int8x16_private a_ = simde_int8x16_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_s8
#define vaddvq_s8(v) simde_vaddvq_s8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vaddvq_s16(simde_int16x8_t a) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_s16(a);
#else
simde_int16x8_private a_ = simde_int16x8_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_s16
#define vaddvq_s16(v) simde_vaddvq_s16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vaddvq_s32(simde_int32x4_t a) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_s32(a);
#else
simde_int32x4_private a_ = simde_int32x4_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_s32
#define vaddvq_s32(v) simde_vaddvq_s32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vaddvq_s64(simde_int64x2_t a) {
int64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_s64(a);
#else
simde_int64x2_private a_ = simde_int64x2_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_s64
#define vaddvq_s64(v) simde_vaddvq_s64(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vaddvq_u8(simde_uint8x16_t a) {
uint8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_u8(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
__m128i a_ = simde_uint8x16_to_m128i(a);
a_ = _mm_sad_epu8(a_, _mm_setzero_si128());
a_ = _mm_add_epi8(a_, _mm_shuffle_epi32(a_, 0xEE));
return HEDLEY_STATIC_CAST(uint8_t, _mm_cvtsi128_si32(a_));
#else
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_u8
#define vaddvq_u8(v) simde_vaddvq_u8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vaddvq_u16(simde_uint16x8_t a) {
uint16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_u16(a);
#else
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_u16
#define vaddvq_u16(v) simde_vaddvq_u16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vaddvq_u32(simde_uint32x4_t a) {
uint32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_u32(a);
#else
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_u32
#define vaddvq_u32(v) simde_vaddvq_u32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vaddvq_u64(simde_uint64x2_t a) {
uint64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_u64(a);
#else
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vaddvq_u64
#define vaddvq_u64(v) simde_vaddvq_u64(v)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ADDV_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/get_lane.h | .h | 14,801 | 520 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_GET_LANE_H)
#define SIMDE_ARM_NEON_GET_LANE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vget_lane_f32(simde_float32x2_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_(vget_lane_f32, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT32_C(0.0)), lane, v);
#else
simde_float32x2_private v_ = simde_float32x2_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_f32
#define vget_lane_f32(v, lane) simde_vget_lane_f32((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vget_lane_f64(simde_float64x1_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
simde_float64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) lane;
return vget_lane_f64(v, 0);
#else
simde_float64x1_private v_ = simde_float64x1_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vget_lane_f64
#define vget_lane_f64(v, lane) simde_vget_lane_f64((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vget_lane_s8(simde_int8x8_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_(vget_lane_s8, r, (HEDLEY_UNREACHABLE(), INT8_C(0)), lane, v);
#else
simde_int8x8_private v_ = simde_int8x8_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_s8
#define vget_lane_s8(v, lane) simde_vget_lane_s8((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vget_lane_s16(simde_int16x4_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_(vget_lane_s16, r, (HEDLEY_UNREACHABLE(), INT16_C(0)), lane, v);
#else
simde_int16x4_private v_ = simde_int16x4_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_s16
#define vget_lane_s16(v, lane) simde_vget_lane_s16((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vget_lane_s32(simde_int32x2_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_(vget_lane_s32, r, (HEDLEY_UNREACHABLE(), INT32_C(0)), lane, v);
#else
simde_int32x2_private v_ = simde_int32x2_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_s32
#define vget_lane_s32(v, lane) simde_vget_lane_s32((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vget_lane_s64(simde_int64x1_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
int64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
(void) lane;
return vget_lane_s64(v, 0);
#else
simde_int64x1_private v_ = simde_int64x1_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_s64
#define vget_lane_s64(v, lane) simde_vget_lane_s64((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vget_lane_u8(simde_uint8x8_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
uint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_(vget_lane_u8, r, (HEDLEY_UNREACHABLE(), UINT8_C(0)), lane, v);
#else
simde_uint8x8_private v_ = simde_uint8x8_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_u8
#define vget_lane_u8(v, lane) simde_vget_lane_u8((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vget_lane_u16(simde_uint16x4_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
uint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_(vget_lane_u16, r, (HEDLEY_UNREACHABLE(), UINT16_C(0)), lane, v);
#else
simde_uint16x4_private v_ = simde_uint16x4_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_u16
#define vget_lane_u16(v, lane) simde_vget_lane_u16((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vget_lane_u32(simde_uint32x2_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
uint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_(vget_lane_u32, r, (HEDLEY_UNREACHABLE(), UINT32_C(0)), lane, v);
#else
simde_uint32x2_private v_ = simde_uint32x2_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_u32
#define vget_lane_u32(v, lane) simde_vget_lane_u32((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vget_lane_u64(simde_uint64x1_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
uint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
(void) lane;
return vget_lane_u64(v, 0);
#else
simde_uint64x1_private v_ = simde_uint64x1_to_private(v);
r = v_.values[lane];
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vget_lane_u64
#define vget_lane_u64(v, lane) simde_vget_lane_u64((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vgetq_lane_f32(simde_float32x4_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_float32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_(vgetq_lane_f32, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT32_C(0.0)), lane, v);
#else
simde_float32x4_private v_ = simde_float32x4_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_CONSTIFY_4_(wasm_f32x4_extract_lane, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT32_C(0.0)), lane, v_.v128);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_f32
#define vgetq_lane_f32(v, lane) simde_vgetq_lane_f32((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vgetq_lane_f64(simde_float64x2_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_(vgetq_lane_f64, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT64_C(0.0)), lane, v);
#else
simde_float64x2_private v_ = simde_float64x2_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_CONSTIFY_2_(wasm_f64x2_extract_lane, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT64_C(0.0)), lane, v_.v128);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_f64
#define vgetq_lane_f64(v, lane) simde_vgetq_lane_f64((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vgetq_lane_s8(simde_int8x16_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_16_(vgetq_lane_s8, r, (HEDLEY_UNREACHABLE(), INT8_C(0)), lane, v);
#else
simde_int8x16_private v_ = simde_int8x16_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int r_;
SIMDE_CONSTIFY_16_(wasm_i8x16_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT8_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(int8_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_s8
#define vgetq_lane_s8(v, lane) simde_vgetq_lane_s8((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vgetq_lane_s16(simde_int16x8_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_(vgetq_lane_s16, r, (HEDLEY_UNREACHABLE(), INT16_C(0)), lane, v);
#else
simde_int16x8_private v_ = simde_int16x8_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int r_;
SIMDE_CONSTIFY_8_(wasm_i16x8_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT16_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(int16_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_s16
#define vgetq_lane_s16(v, lane) simde_vgetq_lane_s16((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vgetq_lane_s32(simde_int32x4_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_(vgetq_lane_s32, r, (HEDLEY_UNREACHABLE(), INT32_C(0)), lane, v);
#else
simde_int32x4_private v_ = simde_int32x4_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int r_;
SIMDE_CONSTIFY_4_(wasm_i32x4_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT32_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(int32_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_s32
#define vgetq_lane_s32(v, lane) simde_vgetq_lane_s32((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vgetq_lane_s64(simde_int64x2_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
int64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_(vgetq_lane_s64, r, (HEDLEY_UNREACHABLE(), INT64_C(0)), lane, v);
#else
simde_int64x2_private v_ = simde_int64x2_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int64_t r_;
SIMDE_CONSTIFY_2_(wasm_i64x2_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT64_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(int64_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_s64
#define vgetq_lane_s64(v, lane) simde_vgetq_lane_s64((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vgetq_lane_u8(simde_uint8x16_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
uint8_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_16_(vgetq_lane_u8, r, (HEDLEY_UNREACHABLE(), UINT8_C(0)), lane, v);
#else
simde_uint8x16_private v_ = simde_uint8x16_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int r_;
SIMDE_CONSTIFY_16_(wasm_i8x16_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT8_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(uint8_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_u8
#define vgetq_lane_u8(v, lane) simde_vgetq_lane_u8((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vgetq_lane_u16(simde_uint16x8_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
uint16_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_(vgetq_lane_u16, r, (HEDLEY_UNREACHABLE(), UINT16_C(0)), lane, v);
#else
simde_uint16x8_private v_ = simde_uint16x8_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int r_;
SIMDE_CONSTIFY_8_(wasm_i16x8_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT16_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(uint16_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_u16
#define vgetq_lane_u16(v, lane) simde_vgetq_lane_u16((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vgetq_lane_u32(simde_uint32x4_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
uint32_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_(vgetq_lane_u32, r, (HEDLEY_UNREACHABLE(), UINT32_C(0)), lane, v);
#else
simde_uint32x4_private v_ = simde_uint32x4_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int32_t r_;
SIMDE_CONSTIFY_4_(wasm_i32x4_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT32_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(uint32_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_u32
#define vgetq_lane_u32(v, lane) simde_vgetq_lane_u32((v), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vgetq_lane_u64(simde_uint64x2_t v, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
uint64_t r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_(vgetq_lane_u64, r, (HEDLEY_UNREACHABLE(), UINT64_C(0)), lane, v);
#else
simde_uint64x2_private v_ = simde_uint64x2_to_private(v);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
int64_t r_;
SIMDE_CONSTIFY_2_(wasm_i64x2_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT64_C(0)), lane, v_.v128);
r = HEDLEY_STATIC_CAST(uint64_t, r_);
#else
r = v_.values[lane];
#endif
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vgetq_lane_u64
#define vgetq_lane_u64(v, lane) simde_vgetq_lane_u64((v), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_GET_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/clz.h | .h | 12,547 | 428 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_CLZ_H)
#define SIMDE_ARM_NEON_CLZ_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_x_vclzb_u8(uint8_t a) {
#if \
defined(SIMDE_BUILTIN_SUFFIX_8_) && \
( \
SIMDE_BUILTIN_HAS_8_(clz) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) \
)
if (HEDLEY_UNLIKELY(a == 0))
return 8 * sizeof(r);
return HEDLEY_STATIC_CAST(uint8_t, SIMDE_BUILTIN_8_(clz)(HEDLEY_STATIC_CAST(unsigned SIMDE_BUILTIN_TYPE_8_, a)));
#else
uint8_t r;
uint8_t shift;
if (HEDLEY_UNLIKELY(a == 0))
return 8 * sizeof(r);
r = HEDLEY_STATIC_CAST(uint8_t, (a > UINT8_C(0x0F)) << 2); a >>= r;
shift = HEDLEY_STATIC_CAST(uint8_t, (a > UINT8_C(0x03)) << 1); a >>= shift; r |= shift;
r |= (a >> 1);
return ((8 * sizeof(r)) - 1) - r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_x_vclzh_u16(uint16_t a) {
#if \
defined(SIMDE_BUILTIN_SUFFIX_16_) && \
( \
SIMDE_BUILTIN_HAS_16_(clz) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) \
)
if (HEDLEY_UNLIKELY(a == 0))
return 8 * sizeof(r);
return HEDLEY_STATIC_CAST(uint16_t, SIMDE_BUILTIN_16_(clz)(HEDLEY_STATIC_CAST(unsigned SIMDE_BUILTIN_TYPE_16_, a)));
#else
uint16_t r;
uint16_t shift;
if (HEDLEY_UNLIKELY(a == 0))
return 8 * sizeof(r);
r = HEDLEY_STATIC_CAST(uint16_t, (a > UINT16_C(0x00FF)) << 3); a >>= r;
shift = HEDLEY_STATIC_CAST(uint16_t, (a > UINT16_C(0x000F)) << 2); a >>= shift; r |= shift;
shift = HEDLEY_STATIC_CAST(uint16_t, (a > UINT16_C(0x0003)) << 1); a >>= shift; r |= shift;
r |= (a >> 1);
return ((8 * sizeof(r)) - 1) - r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_x_vclzs_u32(uint32_t a) {
#if \
defined(SIMDE_BUILTIN_SUFFIX_32_) && \
( \
SIMDE_BUILTIN_HAS_32_(clz) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) \
)
if (HEDLEY_UNLIKELY(a == 0))
return 8 * sizeof(a);
return HEDLEY_STATIC_CAST(uint32_t, SIMDE_BUILTIN_32_(clz)(HEDLEY_STATIC_CAST(unsigned SIMDE_BUILTIN_TYPE_32_, a)));
#else
uint32_t r;
uint32_t shift;
if (HEDLEY_UNLIKELY(a == 0))
return 8 * sizeof(a);
r = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0xFFFF)) << 4); a >>= r;
shift = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0x00FF)) << 3); a >>= shift; r |= shift;
shift = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0x000F)) << 2); a >>= shift; r |= shift;
shift = HEDLEY_STATIC_CAST(uint32_t, (a > UINT32_C(0x0003)) << 1); a >>= shift; r |= shift;
r |= (a >> 1);
return ((8 * sizeof(r)) - 1) - r;
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_x_vclzb_s8(int8_t a) {
return HEDLEY_STATIC_CAST(int8_t, simde_x_vclzb_u8(HEDLEY_STATIC_CAST(uint8_t, a)));
}
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_x_vclzh_s16(int16_t a) {
return HEDLEY_STATIC_CAST(int16_t, simde_x_vclzh_u16(HEDLEY_STATIC_CAST(uint16_t, a)));
}
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_x_vclzs_s32(int32_t a) {
return HEDLEY_STATIC_CAST(int32_t, simde_x_vclzs_u32(HEDLEY_STATIC_CAST(uint32_t, a)));
}
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vclz_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclz_s8(a);
#else
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzb_s8(a_.values[i]);
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclz_s8
#define vclz_s8(a) simde_vclz_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vclz_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclz_s16(a);
#else
simde_int16x4_private
a_ = simde_int16x4_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzh_s16(a_.values[i]);
}
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclz_s16
#define vclz_s16(a) simde_vclz_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vclz_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclz_s32(a);
#else
simde_int32x2_private
a_ = simde_int32x2_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzs_s32(a_.values[i]);
}
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclz_s32
#define vclz_s32(a) simde_vclz_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vclz_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclz_u8(a);
#else
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzb_u8(a_.values[i]);
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclz_u8
#define vclz_u8(a) simde_vclz_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vclz_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclz_u16(a);
#else
simde_uint16x4_private
a_ = simde_uint16x4_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzh_u16(a_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclz_u16
#define vclz_u16(a) simde_vclz_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vclz_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclz_u32(a);
#else
simde_uint32x2_private
a_ = simde_uint32x2_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzs_u32(a_.values[i]);
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclz_u32
#define vclz_u32(a) simde_vclz_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vclzq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclzq_s8(a);
#else
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
r_;
#if defined(SIMDE_X86_GFNI_NATIVE)
/* https://gist.github.com/animetosho/6cb732ccb5ecd86675ca0a442b3c0622 */
a_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0);
a_.m128i = _mm_andnot_si128(_mm_add_epi8(a_.m128i, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a_.m128i);
r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzb_s8(a_.values[i]);
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclzq_s8
#define vclzq_s8(a) simde_vclzq_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vclzq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclzq_s16(a);
#else
simde_int16x8_private
a_ = simde_int16x8_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzh_s16(a_.values[i]);
}
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclzq_s16
#define vclzq_s16(a) simde_vclzq_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vclzq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclzq_s32(a);
#else
simde_int32x4_private
a_ = simde_int32x4_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzs_s32(a_.values[i]);
}
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclzq_s32
#define vclzq_s32(a) simde_vclzq_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vclzq_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclzq_u8(a);
#else
simde_uint8x16_private
a_ = simde_uint8x16_to_private(a),
r_;
#if defined(SIMDE_X86_GFNI_NATIVE)
a_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0);
a_.m128i = _mm_andnot_si128(_mm_add_epi8(a_.m128i, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a_.m128i);
r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzb_u8(a_.values[i]);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclzq_u8
#define vclzq_u8(a) simde_vclzq_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vclzq_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclzq_u16(a);
#else
simde_uint16x8_private
a_ = simde_uint16x8_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzh_u16(a_.values[i]);
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclzq_u16
#define vclzq_u16(a) simde_vclzq_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vclzq_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vclzq_u32(a);
#else
simde_uint32x4_private
a_ = simde_uint32x4_to_private(a),
r_;
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_vclzs_u32(a_.values[i]);
}
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vclzq_u32
#define vclzq_u32(a) simde_vclzq_u32(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CLZ_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/zip.h | .h | 7,435 | 253 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ZIP_H) && !defined(SIMDE_BUG_INTEL_857088)
#define SIMDE_ARM_NEON_ZIP_H
#include "types.h"
#include "zip1.h"
#include "zip2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2x2_t
simde_vzip_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_f32(a, b);
#else
simde_float32x2x2_t r = { { simde_vzip1_f32(a, b), simde_vzip2_f32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_f32
#define vzip_f32(a, b) simde_vzip_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8x2_t
simde_vzip_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_s8(a, b);
#else
simde_int8x8x2_t r = { { simde_vzip1_s8(a, b), simde_vzip2_s8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_s8
#define vzip_s8(a, b) simde_vzip_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4x2_t
simde_vzip_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_s16(a, b);
#else
simde_int16x4x2_t r = { { simde_vzip1_s16(a, b), simde_vzip2_s16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_s16
#define vzip_s16(a, b) simde_vzip_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2x2_t
simde_vzip_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_s32(a, b);
#else
simde_int32x2x2_t r = { { simde_vzip1_s32(a, b), simde_vzip2_s32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_s32
#define vzip_s32(a, b) simde_vzip_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8x2_t
simde_vzip_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_u8(a, b);
#else
simde_uint8x8x2_t r = { { simde_vzip1_u8(a, b), simde_vzip2_u8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_u8
#define vzip_u8(a, b) simde_vzip_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4x2_t
simde_vzip_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_u16(a, b);
#else
simde_uint16x4x2_t r = { { simde_vzip1_u16(a, b), simde_vzip2_u16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_u16
#define vzip_u16(a, b) simde_vzip_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2x2_t
simde_vzip_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzip_u32(a, b);
#else
simde_uint32x2x2_t r = { { simde_vzip1_u32(a, b), simde_vzip2_u32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzip_u32
#define vzip_u32(a, b) simde_vzip_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4x2_t
simde_vzipq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_f32(a, b);
#else
simde_float32x4x2_t r = { { simde_vzip1q_f32(a, b), simde_vzip2q_f32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_f32
#define vzipq_f32(a, b) simde_vzipq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16x2_t
simde_vzipq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_s8(a, b);
#else
simde_int8x16x2_t r = { { simde_vzip1q_s8(a, b), simde_vzip2q_s8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_s8
#define vzipq_s8(a, b) simde_vzipq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8x2_t
simde_vzipq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_s16(a, b);
#else
simde_int16x8x2_t r = { { simde_vzip1q_s16(a, b), simde_vzip2q_s16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_s16
#define vzipq_s16(a, b) simde_vzipq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4x2_t
simde_vzipq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_s32(a, b);
#else
simde_int32x4x2_t r = { { simde_vzip1q_s32(a, b), simde_vzip2q_s32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_s32
#define vzipq_s32(a, b) simde_vzipq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16x2_t
simde_vzipq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_u8(a, b);
#else
simde_uint8x16x2_t r = { { simde_vzip1q_u8(a, b), simde_vzip2q_u8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_u8
#define vzipq_u8(a, b) simde_vzipq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8x2_t
simde_vzipq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_u16(a, b);
#else
simde_uint16x8x2_t r = { { simde_vzip1q_u16(a, b), simde_vzip2q_u16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_u16
#define vzipq_u16(a, b) simde_vzipq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4x2_t
simde_vzipq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vzipq_u32(a, b);
#else
simde_uint32x4x2_t r = { { simde_vzip1q_u32(a, b), simde_vzip2q_u32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vzipq_u32
#define vzipq_u32(a, b) simde_vzipq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ZIP_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rndi.h | .h | 4,190 | 140 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_RNDI_H)
#define SIMDE_ARM_NEON_RNDI_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vrndi_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vrndi_f32(a);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_nearbyintf(a_.values[i]);
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrndi_f32
#define vrndi_f32(a) simde_vrndi_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vrndi_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vrndi_f64(a);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_nearbyint(a_.values[i]);
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrndi_f64
#define vrndi_f64(a) simde_vrndi_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vrndiq_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vrndiq_f32(a);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_CUR_DIRECTION);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_nearbyintf(a_.values[i]);
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrndiq_f32
#define vrndiq_f32(a) simde_vrndiq_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vrndiq_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vrndiq_f64(a);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_CUR_DIRECTION);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_nearbyint(a_.values[i]);
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrndiq_f64
#define vrndiq_f64(a) simde_vrndiq_f64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RNDI_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/types.h | .h | 38,668 | 1,058 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_TYPES_H)
#define SIMDE_ARM_NEON_TYPES_H
#include "../../simde-common.h"
#include "../../simde-f16.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_VECTOR_SUBSCRIPT)
#define SIMDE_ARM_NEON_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name SIMDE_VECTOR(Vector_Size)
#else
#define SIMDE_ARM_NEON_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name[(Vector_Size) / sizeof(Element_Type)]
#endif
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int8_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_int8x8_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int16_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_int16x4_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int32_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_int32x2_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int64_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_int64x1_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint8_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_uint8x8_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint16_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_uint16x4_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint32_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_uint32x2_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint64_t, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_uint64x1_private;
typedef union {
#if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI
SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float16, values, 8);
#else
simde_float16 values[4];
#endif
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_float16x4_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float32, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_float32x2_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float64, values, 8);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 m64;
#endif
} simde_float64x1_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int8_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x16_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_int8x16_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int16_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x8_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_int16x8_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int32_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t neon;
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
// SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_int32x4_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(int64_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int64x2_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_int64x2_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint8_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x16_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_uint8x16_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint16_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x8_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_uint16x8_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint32_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_uint32x4_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(uint64_t, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128i m128i;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int64x2_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_uint64x2_private;
typedef union {
#if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI
SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float16, values, 16);
#else
simde_float16 values[8];
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128 m128;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_float16x8_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float32, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128 m128;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_float32x4_private;
typedef union {
SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float64, values, 16);
#if defined(SIMDE_X86_SSE2_NATIVE)
__m128d m128d;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int64x2_t neon;
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t v128;
#endif
} simde_float64x2_private;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32_t simde_float32_t;
typedef int8x8_t simde_int8x8_t;
typedef int16x4_t simde_int16x4_t;
typedef int32x2_t simde_int32x2_t;
typedef int64x1_t simde_int64x1_t;
typedef uint8x8_t simde_uint8x8_t;
typedef uint16x4_t simde_uint16x4_t;
typedef uint32x2_t simde_uint32x2_t;
typedef uint64x1_t simde_uint64x1_t;
typedef float32x2_t simde_float32x2_t;
typedef int8x16_t simde_int8x16_t;
typedef int16x8_t simde_int16x8_t;
typedef int32x4_t simde_int32x4_t;
typedef int64x2_t simde_int64x2_t;
typedef uint8x16_t simde_uint8x16_t;
typedef uint16x8_t simde_uint16x8_t;
typedef uint32x4_t simde_uint32x4_t;
typedef uint64x2_t simde_uint64x2_t;
typedef float32x4_t simde_float32x4_t;
typedef int8x8x2_t simde_int8x8x2_t;
typedef int16x4x2_t simde_int16x4x2_t;
typedef int32x2x2_t simde_int32x2x2_t;
typedef int64x1x2_t simde_int64x1x2_t;
typedef uint8x8x2_t simde_uint8x8x2_t;
typedef uint16x4x2_t simde_uint16x4x2_t;
typedef uint32x2x2_t simde_uint32x2x2_t;
typedef uint64x1x2_t simde_uint64x1x2_t;
typedef float32x2x2_t simde_float32x2x2_t;
typedef int8x16x2_t simde_int8x16x2_t;
typedef int16x8x2_t simde_int16x8x2_t;
typedef int32x4x2_t simde_int32x4x2_t;
typedef int64x2x2_t simde_int64x2x2_t;
typedef uint8x16x2_t simde_uint8x16x2_t;
typedef uint16x8x2_t simde_uint16x8x2_t;
typedef uint32x4x2_t simde_uint32x4x2_t;
typedef uint64x2x2_t simde_uint64x2x2_t;
typedef float32x4x2_t simde_float32x4x2_t;
typedef int8x8x3_t simde_int8x8x3_t;
typedef int16x4x3_t simde_int16x4x3_t;
typedef int32x2x3_t simde_int32x2x3_t;
typedef int64x1x3_t simde_int64x1x3_t;
typedef uint8x8x3_t simde_uint8x8x3_t;
typedef uint16x4x3_t simde_uint16x4x3_t;
typedef uint32x2x3_t simde_uint32x2x3_t;
typedef uint64x1x3_t simde_uint64x1x3_t;
typedef float32x2x3_t simde_float32x2x3_t;
typedef int8x16x3_t simde_int8x16x3_t;
typedef int16x8x3_t simde_int16x8x3_t;
typedef int32x4x3_t simde_int32x4x3_t;
typedef int64x2x3_t simde_int64x2x3_t;
typedef uint8x16x3_t simde_uint8x16x3_t;
typedef uint16x8x3_t simde_uint16x8x3_t;
typedef uint32x4x3_t simde_uint32x4x3_t;
typedef uint64x2x3_t simde_uint64x2x3_t;
typedef float32x4x3_t simde_float32x4x3_t;
typedef int8x8x4_t simde_int8x8x4_t;
typedef int16x4x4_t simde_int16x4x4_t;
typedef int32x2x4_t simde_int32x2x4_t;
typedef int64x1x4_t simde_int64x1x4_t;
typedef uint8x8x4_t simde_uint8x8x4_t;
typedef uint16x4x4_t simde_uint16x4x4_t;
typedef uint32x2x4_t simde_uint32x2x4_t;
typedef uint64x1x4_t simde_uint64x1x4_t;
typedef float32x2x4_t simde_float32x2x4_t;
typedef int8x16x4_t simde_int8x16x4_t;
typedef int16x8x4_t simde_int16x8x4_t;
typedef int32x4x4_t simde_int32x4x4_t;
typedef int64x2x4_t simde_int64x2x4_t;
typedef uint8x16x4_t simde_uint8x16x4_t;
typedef uint16x8x4_t simde_uint16x8x4_t;
typedef uint32x4x4_t simde_uint32x4x4_t;
typedef uint64x2x4_t simde_uint64x2x4_t;
typedef float32x4x4_t simde_float32x4x4_t;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
typedef float64_t simde_float64_t;
typedef float64x1_t simde_float64x1_t;
typedef float64x2_t simde_float64x2_t;
typedef float64x1x2_t simde_float64x1x2_t;
typedef float64x2x2_t simde_float64x2x2_t;
typedef float64x1x3_t simde_float64x1x3_t;
typedef float64x2x3_t simde_float64x2x3_t;
typedef float64x1x4_t simde_float64x1x4_t;
typedef float64x2x4_t simde_float64x2x4_t;
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN
#endif
#if SIMDE_FLOAT16_API == SIMDE_FLOAT16_API_FP16
typedef float16_t simde_float16_t;
typedef float16x4_t simde_float16x4_t;
typedef float16x8_t simde_float16x8_t;
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#endif
#elif (defined(SIMDE_X86_MMX_NATIVE) || defined(SIMDE_X86_SSE_NATIVE)) && defined(SIMDE_ARM_NEON_FORCE_NATIVE_TYPES)
#define SIMDE_ARM_NEON_NEED_PORTABLE_F32
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64
#define SIMDE_ARM_NEON_NEED_PORTABLE_VXN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN
#if defined(SIMDE_X86_MMX_NATIVE)
typedef __m64 simde_int8x8_t;
typedef __m64 simde_int16x4_t;
typedef __m64 simde_int32x2_t;
typedef __m64 simde_int64x1_t;
typedef __m64 simde_uint8x8_t;
typedef __m64 simde_uint16x4_t;
typedef __m64 simde_uint32x2_t;
typedef __m64 simde_uint64x1_t;
typedef __m64 simde_float32x2_t;
typedef __m64 simde_float64x1_t;
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_I8X8
#define SIMDE_ARM_NEON_NEED_PORTABLE_I16X4
#define SIMDE_ARM_NEON_NEED_PORTABLE_I32X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_I64X1
#define SIMDE_ARM_NEON_NEED_PORTABLE_U8X8
#define SIMDE_ARM_NEON_NEED_PORTABLE_U16X4
#define SIMDE_ARM_NEON_NEED_PORTABLE_U32X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_U64X1
#define SIMDE_ARM_NEON_NEED_PORTABLE_F32X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde_float32x4_t;
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_F32X4
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
typedef __m128i simde_int8x16_t;
typedef __m128i simde_int16x8_t;
typedef __m128i simde_int32x4_t;
typedef __m128i simde_int64x2_t;
typedef __m128i simde_uint8x16_t;
typedef __m128i simde_uint16x8_t;
typedef __m128i simde_uint32x4_t;
typedef __m128i simde_uint64x2_t;
typedef __m128d simde_float64x2_t;
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_I8X16
#define SIMDE_ARM_NEON_NEED_PORTABLE_I16X8
#define SIMDE_ARM_NEON_NEED_PORTABLE_I32X4
#define SIMDE_ARM_NEON_NEED_PORTABLE_I64X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_U8X16
#define SIMDE_ARM_NEON_NEED_PORTABLE_U16X8
#define SIMDE_ARM_NEON_NEED_PORTABLE_U32X4
#define SIMDE_ARM_NEON_NEED_PORTABLE_U64X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2
#endif
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_ARM_NEON_FORCE_NATIVE_TYPES)
#define SIMDE_ARM_NEON_NEED_PORTABLE_F32
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64
#define SIMDE_ARM_NEON_NEED_PORTABLE_64BIT
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_VXN
typedef v128_t simde_int8x16_t;
typedef v128_t simde_int16x8_t;
typedef v128_t simde_int32x4_t;
typedef v128_t simde_int64x2_t;
typedef v128_t simde_uint8x16_t;
typedef v128_t simde_uint16x8_t;
typedef v128_t simde_uint32x4_t;
typedef v128_t simde_uint64x2_t;
typedef v128_t simde_float32x4_t;
typedef v128_t simde_float64x2_t;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
#define SIMDE_ARM_NEON_NEED_PORTABLE_F32
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64
#define SIMDE_ARM_NEON_NEED_PORTABLE_64BIT
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_VXN
typedef SIMDE_POWER_ALTIVEC_VECTOR(signed char) simde_int8x16_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(signed short) simde_int16x8_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(signed int) simde_int32x4_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) simde_uint8x16_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) simde_uint16x8_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) simde_uint32x4_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde_float32x4_t;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(signed long long) simde_int64x2_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) simde_uint64x2_t;
typedef SIMDE_POWER_ALTIVEC_VECTOR(double) simde_float64x2_t;
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_I64X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_U64X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#endif
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#elif defined(SIMDE_VECTOR)
typedef simde_float32 simde_float32_t;
typedef simde_float64 simde_float64_t;
typedef int8_t simde_int8x8_t SIMDE_VECTOR(8);
typedef int16_t simde_int16x4_t SIMDE_VECTOR(8);
typedef int32_t simde_int32x2_t SIMDE_VECTOR(8);
typedef int64_t simde_int64x1_t SIMDE_VECTOR(8);
typedef uint8_t simde_uint8x8_t SIMDE_VECTOR(8);
typedef uint16_t simde_uint16x4_t SIMDE_VECTOR(8);
typedef uint32_t simde_uint32x2_t SIMDE_VECTOR(8);
typedef uint64_t simde_uint64x1_t SIMDE_VECTOR(8);
typedef simde_float32_t simde_float32x2_t SIMDE_VECTOR(8);
typedef simde_float64_t simde_float64x1_t SIMDE_VECTOR(8);
typedef int8_t simde_int8x16_t SIMDE_VECTOR(16);
typedef int16_t simde_int16x8_t SIMDE_VECTOR(16);
typedef int32_t simde_int32x4_t SIMDE_VECTOR(16);
typedef int64_t simde_int64x2_t SIMDE_VECTOR(16);
typedef uint8_t simde_uint8x16_t SIMDE_VECTOR(16);
typedef uint16_t simde_uint16x8_t SIMDE_VECTOR(16);
typedef uint32_t simde_uint32x4_t SIMDE_VECTOR(16);
typedef uint64_t simde_uint64x2_t SIMDE_VECTOR(16);
typedef simde_float32_t simde_float32x4_t SIMDE_VECTOR(16);
typedef simde_float64_t simde_float64x2_t SIMDE_VECTOR(16);
#if defined(SIMDE_ARM_NEON_FP16)
typedef simde_float16 simde_float16_t;
typedef simde_float16_t simde_float16x4_t SIMDE_VECTOR(8);
typedef simde_float16_t simde_float16x8_t SIMDE_VECTOR(16);
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#endif
#define SIMDE_ARM_NEON_NEED_PORTABLE_VXN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN
#else
#define SIMDE_ARM_NEON_NEED_PORTABLE_F16
#define SIMDE_ARM_NEON_NEED_PORTABLE_F32
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64
#define SIMDE_ARM_NEON_NEED_PORTABLE_64BIT
#define SIMDE_ARM_NEON_NEED_PORTABLE_128BIT
#define SIMDE_ARM_NEON_NEED_PORTABLE_VXN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN
#define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I8X8) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_int8x8_private simde_int8x8_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I16X4) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_int16x4_private simde_int16x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I32X2) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_int32x2_private simde_int32x2_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I64X1) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_int64x1_private simde_int64x1_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U8X8) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_uint8x8_private simde_uint8x8_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U16X4) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_uint16x4_private simde_uint16x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U32X2) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_uint32x2_private simde_uint32x2_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U64X1) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_uint64x1_private simde_uint64x1_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F32X2) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_float32x2_private simde_float32x2_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F64X1) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_64BIT)
typedef simde_float64x1_private simde_float64x1_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I8X16) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_int8x16_private simde_int8x16_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I16X8) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_int16x8_private simde_int16x8_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I32X4) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_int32x4_private simde_int32x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_I64X2) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_int64x2_private simde_int64x2_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U8X16) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_uint8x16_private simde_uint8x16_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U16X8) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_uint16x8_private simde_uint16x8_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U32X4) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_uint32x4_private simde_uint32x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_U64X2) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_uint64x2_private simde_uint64x2_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F32X4) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_float32x4_private simde_float32x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F64X2) || defined(SIMDE_ARM_NEON_NEED_PORTABLE_128BIT)
typedef simde_float64x2_private simde_float64x2_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F16)
typedef simde_float16 simde_float16_t;
typedef simde_float16x4_private simde_float16x4_t;
typedef simde_float16x8_private simde_float16x8_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F32)
typedef simde_float32 simde_float32_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F64)
typedef simde_float64 simde_float64_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_VXN) && !defined(SIMDE_BUG_INTEL_857088)
typedef struct simde_int8x8x2_t {
simde_int8x8_t val[2];
} simde_int8x8x2_t;
typedef struct simde_int16x4x2_t {
simde_int16x4_t val[2];
} simde_int16x4x2_t;
typedef struct simde_int32x2x2_t {
simde_int32x2_t val[2];
} simde_int32x2x2_t;
typedef struct simde_int64x1x2_t {
simde_int64x1_t val[2];
} simde_int64x1x2_t;
typedef struct simde_uint8x8x2_t {
simde_uint8x8_t val[2];
} simde_uint8x8x2_t;
typedef struct simde_uint16x4x2_t {
simde_uint16x4_t val[2];
} simde_uint16x4x2_t;
typedef struct simde_uint32x2x2_t {
simde_uint32x2_t val[2];
} simde_uint32x2x2_t;
typedef struct simde_uint64x1x2_t {
simde_uint64x1_t val[2];
} simde_uint64x1x2_t;
typedef struct simde_float32x2x2_t {
simde_float32x2_t val[2];
} simde_float32x2x2_t;
typedef struct simde_int8x16x2_t {
simde_int8x16_t val[2];
} simde_int8x16x2_t;
typedef struct simde_int16x8x2_t {
simde_int16x8_t val[2];
} simde_int16x8x2_t;
typedef struct simde_int32x4x2_t {
simde_int32x4_t val[2];
} simde_int32x4x2_t;
typedef struct simde_int64x2x2_t {
simde_int64x2_t val[2];
} simde_int64x2x2_t;
typedef struct simde_uint8x16x2_t {
simde_uint8x16_t val[2];
} simde_uint8x16x2_t;
typedef struct simde_uint16x8x2_t {
simde_uint16x8_t val[2];
} simde_uint16x8x2_t;
typedef struct simde_uint32x4x2_t {
simde_uint32x4_t val[2];
} simde_uint32x4x2_t;
typedef struct simde_uint64x2x2_t {
simde_uint64x2_t val[2];
} simde_uint64x2x2_t;
typedef struct simde_float32x4x2_t {
simde_float32x4_t val[2];
} simde_float32x4x2_t;
typedef struct simde_int8x8x3_t {
simde_int8x8_t val[3];
} simde_int8x8x3_t;
typedef struct simde_int16x4x3_t {
simde_int16x4_t val[3];
} simde_int16x4x3_t;
typedef struct simde_int32x2x3_t {
simde_int32x2_t val[3];
} simde_int32x2x3_t;
typedef struct simde_int64x1x3_t {
simde_int64x1_t val[3];
} simde_int64x1x3_t;
typedef struct simde_uint8x8x3_t {
simde_uint8x8_t val[3];
} simde_uint8x8x3_t;
typedef struct simde_uint16x4x3_t {
simde_uint16x4_t val[3];
} simde_uint16x4x3_t;
typedef struct simde_uint32x2x3_t {
simde_uint32x2_t val[3];
} simde_uint32x2x3_t;
typedef struct simde_uint64x1x3_t {
simde_uint64x1_t val[3];
} simde_uint64x1x3_t;
typedef struct simde_float32x2x3_t {
simde_float32x2_t val[3];
} simde_float32x2x3_t;
typedef struct simde_int8x16x3_t {
simde_int8x16_t val[3];
} simde_int8x16x3_t;
typedef struct simde_int16x8x3_t {
simde_int16x8_t val[3];
} simde_int16x8x3_t;
typedef struct simde_int32x4x3_t {
simde_int32x4_t val[3];
} simde_int32x4x3_t;
typedef struct simde_int64x2x3_t {
simde_int64x2_t val[3];
} simde_int64x2x3_t;
typedef struct simde_uint8x16x3_t {
simde_uint8x16_t val[3];
} simde_uint8x16x3_t;
typedef struct simde_uint16x8x3_t {
simde_uint16x8_t val[3];
} simde_uint16x8x3_t;
typedef struct simde_uint32x4x3_t {
simde_uint32x4_t val[3];
} simde_uint32x4x3_t;
typedef struct simde_uint64x2x3_t {
simde_uint64x2_t val[3];
} simde_uint64x2x3_t;
typedef struct simde_float32x4x3_t {
simde_float32x4_t val[3];
} simde_float32x4x3_t;
typedef struct simde_int8x8x4_t {
simde_int8x8_t val[4];
} simde_int8x8x4_t;
typedef struct simde_int16x4x4_t {
simde_int16x4_t val[4];
} simde_int16x4x4_t;
typedef struct simde_int32x2x4_t {
simde_int32x2_t val[4];
} simde_int32x2x4_t;
typedef struct simde_int64x1x4_t {
simde_int64x1_t val[4];
} simde_int64x1x4_t;
typedef struct simde_uint8x8x4_t {
simde_uint8x8_t val[4];
} simde_uint8x8x4_t;
typedef struct simde_uint16x4x4_t {
simde_uint16x4_t val[4];
} simde_uint16x4x4_t;
typedef struct simde_uint32x2x4_t {
simde_uint32x2_t val[4];
} simde_uint32x2x4_t;
typedef struct simde_uint64x1x4_t {
simde_uint64x1_t val[4];
} simde_uint64x1x4_t;
typedef struct simde_float32x2x4_t {
simde_float32x2_t val[4];
} simde_float32x2x4_t;
typedef struct simde_int8x16x4_t {
simde_int8x16_t val[4];
} simde_int8x16x4_t;
typedef struct simde_int16x8x4_t {
simde_int16x8_t val[4];
} simde_int16x8x4_t;
typedef struct simde_int32x4x4_t {
simde_int32x4_t val[4];
} simde_int32x4x4_t;
typedef struct simde_int64x2x4_t {
simde_int64x2_t val[4];
} simde_int64x2x4_t;
typedef struct simde_uint8x16x4_t {
simde_uint8x16_t val[4];
} simde_uint8x16x4_t;
typedef struct simde_uint16x8x4_t {
simde_uint16x8_t val[4];
} simde_uint16x8x4_t;
typedef struct simde_uint32x4x4_t {
simde_uint32x4_t val[4];
} simde_uint32x4x4_t;
typedef struct simde_uint64x2x4_t {
simde_uint64x2_t val[4];
} simde_uint64x2x4_t;
typedef struct simde_float32x4x4_t {
simde_float32x4_t val[4];
} simde_float32x4x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN)
typedef struct simde_float64x1x2_t {
simde_float64x1_t val[2];
} simde_float64x1x2_t;
typedef struct simde_float64x1x3_t {
simde_float64x1_t val[3];
} simde_float64x1x3_t;
typedef struct simde_float64x1x4_t {
simde_float64x1_t val[4];
} simde_float64x1x4_t;
#endif
#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN)
typedef struct simde_float64x2x2_t {
simde_float64x2_t val[2];
} simde_float64x2x2_t;
typedef struct simde_float64x2x3_t {
simde_float64x2_t val[3];
} simde_float64x2x3_t;
typedef struct simde_float64x2x4_t {
simde_float64x2_t val[4];
} simde_float64x2x4_t;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) || defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
typedef simde_float16_t float16_t;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
typedef simde_float32_t float32_t;
typedef simde_int8x8_t int8x8_t;
typedef simde_int16x4_t int16x4_t;
typedef simde_int32x2_t int32x2_t;
typedef simde_int64x1_t int64x1_t;
typedef simde_uint8x8_t uint8x8_t;
typedef simde_uint16x4_t uint16x4_t;
typedef simde_uint32x2_t uint32x2_t;
typedef simde_uint64x1_t uint64x1_t;
typedef simde_float32x2_t float32x2_t;
typedef simde_int8x16_t int8x16_t;
typedef simde_int16x8_t int16x8_t;
typedef simde_int32x4_t int32x4_t;
typedef simde_int64x2_t int64x2_t;
typedef simde_uint8x16_t uint8x16_t;
typedef simde_uint16x8_t uint16x8_t;
typedef simde_uint32x4_t uint32x4_t;
typedef simde_uint64x2_t uint64x2_t;
typedef simde_float32x4_t float32x4_t;
typedef simde_int8x8x2_t int8x8x2_t;
typedef simde_int16x4x2_t int16x4x2_t;
typedef simde_int32x2x2_t int32x2x2_t;
typedef simde_int64x1x2_t int64x1x2_t;
typedef simde_uint8x8x2_t uint8x8x2_t;
typedef simde_uint16x4x2_t uint16x4x2_t;
typedef simde_uint32x2x2_t uint32x2x2_t;
typedef simde_uint64x1x2_t uint64x1x2_t;
typedef simde_float32x2x2_t float32x2x2_t;
typedef simde_int8x16x2_t int8x16x2_t;
typedef simde_int16x8x2_t int16x8x2_t;
typedef simde_int32x4x2_t int32x4x2_t;
typedef simde_int64x2x2_t int64x2x2_t;
typedef simde_uint8x16x2_t uint8x16x2_t;
typedef simde_uint16x8x2_t uint16x8x2_t;
typedef simde_uint32x4x2_t uint32x4x2_t;
typedef simde_uint64x2x2_t uint64x2x2_t;
typedef simde_float32x4x2_t float32x4x2_t;
typedef simde_int8x8x3_t int8x8x3_t;
typedef simde_int16x4x3_t int16x4x3_t;
typedef simde_int32x2x3_t int32x2x3_t;
typedef simde_int64x1x3_t int64x1x3_t;
typedef simde_uint8x8x3_t uint8x8x3_t;
typedef simde_uint16x4x3_t uint16x4x3_t;
typedef simde_uint32x2x3_t uint32x2x3_t;
typedef simde_uint64x1x3_t uint64x1x3_t;
typedef simde_float32x2x3_t float32x2x3_t;
typedef simde_int8x16x3_t int8x16x3_t;
typedef simde_int16x8x3_t int16x8x3_t;
typedef simde_int32x4x3_t int32x4x3_t;
typedef simde_int64x2x3_t int64x2x3_t;
typedef simde_uint8x16x3_t uint8x16x3_t;
typedef simde_uint16x8x3_t uint16x8x3_t;
typedef simde_uint32x4x3_t uint32x4x3_t;
typedef simde_uint64x2x3_t uint64x2x3_t;
typedef simde_float32x4x3_t float32x4x3_t;
typedef simde_int8x8x4_t int8x8x4_t;
typedef simde_int16x4x4_t int16x4x4_t;
typedef simde_int32x2x4_t int32x2x4_t;
typedef simde_int64x1x4_t int64x1x4_t;
typedef simde_uint8x8x4_t uint8x8x4_t;
typedef simde_uint16x4x4_t uint16x4x4_t;
typedef simde_uint32x2x4_t uint32x2x4_t;
typedef simde_uint64x1x4_t uint64x1x4_t;
typedef simde_float32x2x4_t float32x2x4_t;
typedef simde_int8x16x4_t int8x16x4_t;
typedef simde_int16x8x4_t int16x8x4_t;
typedef simde_int32x4x4_t int32x4x4_t;
typedef simde_int64x2x4_t int64x2x4_t;
typedef simde_uint8x16x4_t uint8x16x4_t;
typedef simde_uint16x8x4_t uint16x8x4_t;
typedef simde_uint32x4x4_t uint32x4x4_t;
typedef simde_uint64x2x4_t uint64x2x4_t;
typedef simde_float32x4x4_t float32x4x4_t;
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
typedef simde_float64_t float64_t;
typedef simde_float16x4_t float16x4_t;
typedef simde_float64x1_t float64x1_t;
typedef simde_float16x8_t float16x8_t;
typedef simde_float64x2_t float64x2_t;
typedef simde_float64x1x2_t float64x1x2_t;
typedef simde_float64x2x2_t float64x2x2_t;
typedef simde_float64x1x3_t float64x1x3_t;
typedef simde_float64x2x3_t float64x2x3_t;
typedef simde_float64x1x4_t float64x1x4_t;
typedef simde_float64x2x4_t float64x2x4_t;
#endif
#if defined(SIMDE_X86_MMX_NATIVE)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x8_to_m64, __m64, simde_int8x8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x4_to_m64, __m64, simde_int16x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x2_to_m64, __m64, simde_int32x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x1_to_m64, __m64, simde_int64x1_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x8_to_m64, __m64, simde_uint8x8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x4_to_m64, __m64, simde_uint16x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x2_to_m64, __m64, simde_uint32x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x1_to_m64, __m64, simde_uint64x1_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x2_to_m64, __m64, simde_float32x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x1_to_m64, __m64, simde_float64x1_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x8_from_m64, simde_int8x8_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x4_from_m64, simde_int16x4_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x2_from_m64, simde_int32x2_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x1_from_m64, simde_int64x1_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x8_from_m64, simde_uint8x8_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x4_from_m64, simde_uint16x4_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x2_from_m64, simde_uint32x2_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x1_from_m64, simde_uint64x1_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x2_from_m64, simde_float32x2_t, __m64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x1_from_m64, simde_float64x1_t, __m64)
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_to_m128, __m128, simde_float32x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_from_m128, simde_float32x4_t, __m128)
#endif
#if defined(SIMDE_X86_SSE2_NATIVE)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_to_m128i, __m128i, simde_int8x16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_to_m128i, __m128i, simde_int16x8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_to_m128i, __m128i, simde_int32x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_to_m128i, __m128i, simde_int64x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_to_m128i, __m128i, simde_uint8x16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_to_m128i, __m128i, simde_uint16x8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_to_m128i, __m128i, simde_uint32x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_to_m128i, __m128i, simde_uint64x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_to_m128d, __m128d, simde_float64x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_from_m128i, simde_int8x16_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_from_m128i, simde_int16x8_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_from_m128i, simde_int32x4_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_from_m128i, simde_int64x2_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_from_m128i, simde_uint8x16_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_from_m128i, simde_uint16x8_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_from_m128i, simde_uint32x4_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_from_m128i, simde_uint64x2_t, __m128i)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_from_m128d, simde_float64x2_t, __m128d)
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_to_v128, v128_t, simde_int8x16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_to_v128, v128_t, simde_int16x8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_to_v128, v128_t, simde_int32x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_to_v128, v128_t, simde_int64x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_to_v128, v128_t, simde_uint8x16_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_to_v128, v128_t, simde_uint16x8_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_to_v128, v128_t, simde_uint32x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_to_v128, v128_t, simde_uint64x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_to_v128, v128_t, simde_float32x4_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_to_v128, v128_t, simde_float64x2_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_from_v128, simde_int8x16_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_from_v128, simde_int16x8_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_from_v128, simde_int32x4_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_from_v128, simde_int64x2_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_from_v128, simde_uint8x16_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_from_v128, simde_uint16x8_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_from_v128, simde_uint32x4_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_from_v128, simde_uint64x2_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_from_v128, simde_float32x4_t, v128_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_from_v128, simde_float64x2_t, v128_t)
#endif
#define SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(T) \
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_##T##_to_private, simde_##T##_private, simde_##T##_t) \
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_##T##_from_private, simde_##T##_t, simde_##T##_private) \
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int8x8)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int16x4)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int32x2)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int64x1)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint8x8)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint16x4)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint32x2)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint64x1)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float16x4)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float32x2)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float64x1)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int8x16)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int16x8)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int32x4)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int64x2)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint8x16)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint16x8)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint32x4)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint64x2)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float16x8)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float32x4)
SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float64x2)
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_NEON_TYPES_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/min.h | .h | 21,508 | 678 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MIN_H)
#define SIMDE_ARM_NEON_MIN_H
#include "types.h"
#include "cgt.h"
#include "ceq.h"
#include "bsl.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmin_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_f32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(64)
simde_float32x2_t r = simde_vbsl_f32(simde_vcgt_f32(b, a), a, b);
#if !defined(SIMDE_FAST_NANS)
r = simde_vbsl_f32(simde_vceq_f32(a, a), simde_vbsl_f32(simde_vceq_f32(b, b), r, b), a);
#endif
return r;
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if !defined(SIMDE_FAST_NANS)
if (simde_math_isnanf(a_.values[i])) {
r_.values[i] = a_.values[i];
} else if (simde_math_isnanf(b_.values[i])) {
r_.values[i] = b_.values[i];
} else {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#else
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
#endif
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_f32
#define vmin_f32(a, b) simde_vmin_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmin_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmin_f64(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(64)
simde_float64x1_t r = simde_vbsl_f64(simde_vcgt_f64(b, a), a, b);
#if !defined(SIMDE_FAST_NANS)
r = simde_vbsl_f64(simde_vceq_f64(a, a), simde_vbsl_f64(simde_vceq_f64(b, b), r, b), a);
#endif
return r;
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if !defined(SIMDE_FAST_NANS)
if (simde_math_isnan(a_.values[i])) {
r_.values[i] = a_.values[i];
} else if (simde_math_isnan(b_.values[i])) {
r_.values[i] = b_.values[i];
} else {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#else
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
#endif
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmin_f64
#define vmin_f64(a, b) simde_vmin_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vmin_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_s8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_s8(simde_vcgt_s8(b, a), a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_s8
#define vmin_s8(a, b) simde_vmin_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmin_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_s16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_s16(simde_vcgt_s16(b, a), a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_s16
#define vmin_s16(a, b) simde_vmin_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmin_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_s32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_s32(simde_vcgt_s32(b, a), a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_s32
#define vmin_s32(a, b) simde_vmin_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_x_vmin_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_s64(simde_vcgt_s64(b, a), a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_int64x1_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vmin_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_u8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_u8(simde_vcgt_u8(b, a), a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_u8
#define vmin_u8(a, b) simde_vmin_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmin_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_u16(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && !defined(SIMDE_X86_SSE2_NATIVE)
return simde_vbsl_u16(simde_vcgt_u16(b, a), a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
/* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */
r_.m64 = _mm_sub_pi16(a_.m64, _mm_subs_pu16(a_.m64, b_.m64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_u16
#define vmin_u16(a, b) simde_vmin_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmin_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmin_u32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_u32(simde_vcgt_u32(b, a), a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmin_u32
#define vmin_u32(a, b) simde_vmin_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_x_vmin_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vbsl_u64(simde_vcgt_u64(b, a), a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_uint64x1_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vminq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_f32(a, b);
#elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && defined(SIMDE_FAST_NANS)
return vec_min(a, b);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_FAST_NANS)
r_.m128 = _mm_min_ps(a_.m128, b_.m128);
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128 = _mm_blendv_ps(_mm_set1_ps(SIMDE_MATH_NANF), _mm_min_ps(a_.m128, b_.m128), _mm_cmpord_ps(a_.m128, b_.m128));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f32x4_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if !defined(SIMDE_FAST_NANS)
if (simde_math_isnanf(a_.values[i])) {
r_.values[i] = a_.values[i];
} else if (simde_math_isnanf(b_.values[i])) {
r_.values[i] = b_.values[i];
} else {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#else
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
#endif
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_f32
#define vminq_f32(a, b) simde_vminq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vminq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vminq_f64(a, b);
#elif (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && defined(SIMDE_FAST_NANS)
return vec_min(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_FAST_NANS)
r_.m128d = _mm_min_pd(a_.m128d, b_.m128d);
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128d = _mm_blendv_pd(_mm_set1_pd(SIMDE_MATH_NAN), _mm_min_pd(a_.m128d, b_.m128d), _mm_cmpord_pd(a_.m128d, b_.m128d));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f64x2_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
#if !defined(SIMDE_FAST_NANS)
if (simde_math_isnan(a_.values[i])) {
r_.values[i] = a_.values[i];
} else if (simde_math_isnan(b_.values[i])) {
r_.values[i] = b_.values[i];
} else {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#else
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
#endif
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminq_f64
#define vminq_f64(a, b) simde_vminq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vminq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_s8
#define vminq_s8(a, b) simde_vminq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vminq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_min_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_s16
#define vminq_s16(a, b) simde_vminq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vminq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_s32
#define vminq_s32(a, b) simde_vminq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_x_vminq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_min_epi64(a_.m128i, b_.m128i);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vminq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_min_epu8(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u8x16_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_u8
#define vminq_u8(a, b) simde_vminq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vminq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epu16(a_.m128i, b_.m128i);
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */
r_.m128i = _mm_sub_epi16(a_.m128i, _mm_subs_epu16(a_.m128i, b_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u16x8_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_u16
#define vminq_u16(a, b) simde_vminq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vminq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vminq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_min_epu32(a_.m128i, b_.m128i);
#elif defined(SIMDE_X86_SSE2_NATIVE)
const __m128i i32_min = _mm_set1_epi32(INT32_MIN);
const __m128i difference = _mm_sub_epi32(a_.m128i, b_.m128i);
__m128i m =
_mm_cmpeq_epi32(
/* _mm_subs_epu32(a_.sse_m128i, b_.sse_m128i) */
_mm_and_si128(
difference,
_mm_xor_si128(
_mm_cmpgt_epi32(
_mm_xor_si128(difference, i32_min),
_mm_xor_si128(a_.m128i, i32_min)
),
_mm_set1_epi32(~INT32_C(0))
)
),
_mm_setzero_si128()
);
r_.m128i =
_mm_or_si128(
_mm_and_si128(m, a_.m128i),
_mm_andnot_si128(m, b_.m128i)
);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u32x4_min(a_.v128, b_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vminq_u32
#define vminq_u32(a, b) simde_vminq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_x_vminq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
return vec_min(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i];
}
return simde_uint64x2_from_private(r_);
#endif
}
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MIN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/reinterpret.h | .h | 97,470 | 3,170 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_REINTERPRET_H)
#define SIMDE_ARM_NEON_REINTERPRET_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_s16(a);
#else
simde_int8x8_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_s16
#define vreinterpret_s8_s16 simde_vreinterpret_s8_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_s32(a);
#else
simde_int8x8_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_s32
#define vreinterpret_s8_s32 simde_vreinterpret_s8_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_s64(a);
#else
simde_int8x8_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_s64
#define vreinterpret_s8_s64 simde_vreinterpret_s8_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_u8(a);
#else
simde_int8x8_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_u8
#define vreinterpret_s8_u8 simde_vreinterpret_s8_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_u16(a);
#else
simde_int8x8_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_u16
#define vreinterpret_s8_u16 simde_vreinterpret_s8_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_u32(a);
#else
simde_int8x8_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_u32
#define vreinterpret_s8_u32 simde_vreinterpret_s8_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_u64(a);
#else
simde_int8x8_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_u64
#define vreinterpret_s8_u64 simde_vreinterpret_s8_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s8_f32(a);
#else
simde_int8x8_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_f32
#define vreinterpret_s8_f32 simde_vreinterpret_s8_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vreinterpret_s8_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_s8_f64(a);
#else
simde_int8x8_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s8_f64
#define vreinterpret_s8_f64 simde_vreinterpret_s8_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_s16(a);
#else
simde_int8x16_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_s16
#define vreinterpretq_s8_s16(a) simde_vreinterpretq_s8_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_s32(a);
#else
simde_int8x16_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_s32
#define vreinterpretq_s8_s32(a) simde_vreinterpretq_s8_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_s64(a);
#else
simde_int8x16_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_s64
#define vreinterpretq_s8_s64(a) simde_vreinterpretq_s8_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_u8(a);
#else
simde_int8x16_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_u8
#define vreinterpretq_s8_u8(a) simde_vreinterpretq_s8_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_u16(a);
#else
simde_int8x16_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_u16
#define vreinterpretq_s8_u16(a) simde_vreinterpretq_s8_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_u32(a);
#else
simde_int8x16_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_u32
#define vreinterpretq_s8_u32(a) simde_vreinterpretq_s8_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_u64(a);
#else
simde_int8x16_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_u64
#define vreinterpretq_s8_u64(a) simde_vreinterpretq_s8_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s8_f32(a);
#else
simde_int8x16_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_f32
#define vreinterpretq_s8_f32(a) simde_vreinterpretq_s8_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vreinterpretq_s8_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_s8_f64(a);
#else
simde_int8x16_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s8_f64
#define vreinterpretq_s8_f64(a) simde_vreinterpretq_s8_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_s8(a);
#else
simde_int16x4_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_s8
#define vreinterpret_s16_s8 simde_vreinterpret_s16_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_s32(a);
#else
simde_int16x4_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_s32
#define vreinterpret_s16_s32 simde_vreinterpret_s16_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_s64(a);
#else
simde_int16x4_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_s64
#define vreinterpret_s16_s64 simde_vreinterpret_s16_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_u8(a);
#else
simde_int16x4_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_u8
#define vreinterpret_s16_u8 simde_vreinterpret_s16_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_u16(a);
#else
simde_int16x4_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_u16
#define vreinterpret_s16_u16 simde_vreinterpret_s16_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_u32(a);
#else
simde_int16x4_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_u32
#define vreinterpret_s16_u32 simde_vreinterpret_s16_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_u64(a);
#else
simde_int16x4_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_u64
#define vreinterpret_s16_u64 simde_vreinterpret_s16_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s16_f32(a);
#else
simde_int16x4_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_f32
#define vreinterpret_s16_f32 simde_vreinterpret_s16_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vreinterpret_s16_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_s16_f64(a);
#else
simde_int16x4_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s16_f64
#define vreinterpret_s16_f64 simde_vreinterpret_s16_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_s8(a);
#else
simde_int16x8_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_s8
#define vreinterpretq_s16_s8(a) simde_vreinterpretq_s16_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_s32(a);
#else
simde_int16x8_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_s32
#define vreinterpretq_s16_s32(a) simde_vreinterpretq_s16_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_s64(a);
#else
simde_int16x8_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_s64
#define vreinterpretq_s16_s64(a) simde_vreinterpretq_s16_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_u8(a);
#else
simde_int16x8_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_u8
#define vreinterpretq_s16_u8(a) simde_vreinterpretq_s16_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_u16(a);
#else
simde_int16x8_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_u16
#define vreinterpretq_s16_u16(a) simde_vreinterpretq_s16_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_u32(a);
#else
simde_int16x8_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_u32
#define vreinterpretq_s16_u32(a) simde_vreinterpretq_s16_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_u64(a);
#else
simde_int16x8_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_u64
#define vreinterpretq_s16_u64(a) simde_vreinterpretq_s16_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s16_f32(a);
#else
simde_int16x8_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_f32
#define vreinterpretq_s16_f32(a) simde_vreinterpretq_s16_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vreinterpretq_s16_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_s16_f64(a);
#else
simde_int16x8_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s16_f64
#define vreinterpretq_s16_f64(a) simde_vreinterpretq_s16_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_s8(a);
#else
simde_int32x2_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_s8
#define vreinterpret_s32_s8 simde_vreinterpret_s32_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_s16(a);
#else
simde_int32x2_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_s16
#define vreinterpret_s32_s16 simde_vreinterpret_s32_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_s64(a);
#else
simde_int32x2_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_s64
#define vreinterpret_s32_s64 simde_vreinterpret_s32_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_u8(a);
#else
simde_int32x2_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_u8
#define vreinterpret_s32_u8 simde_vreinterpret_s32_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_u16(a);
#else
simde_int32x2_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_u16
#define vreinterpret_s32_u16 simde_vreinterpret_s32_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_u32(a);
#else
simde_int32x2_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_u32
#define vreinterpret_s32_u32 simde_vreinterpret_s32_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_u64(a);
#else
simde_int32x2_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_u64
#define vreinterpret_s32_u64 simde_vreinterpret_s32_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s32_f32(a);
#else
simde_int32x2_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_f32
#define vreinterpret_s32_f32 simde_vreinterpret_s32_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vreinterpret_s32_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_s32_f64(a);
#else
simde_int32x2_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s32_f64
#define vreinterpret_s32_f64 simde_vreinterpret_s32_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_s8(a);
#else
simde_int32x4_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_s8
#define vreinterpretq_s32_s8(a) simde_vreinterpretq_s32_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_s16(a);
#else
simde_int32x4_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_s16
#define vreinterpretq_s32_s16(a) simde_vreinterpretq_s32_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_s64(a);
#else
simde_int32x4_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_s64
#define vreinterpretq_s32_s64(a) simde_vreinterpretq_s32_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_u8(a);
#else
simde_int32x4_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_u8
#define vreinterpretq_s32_u8(a) simde_vreinterpretq_s32_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_u16(a);
#else
simde_int32x4_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_u16
#define vreinterpretq_s32_u16(a) simde_vreinterpretq_s32_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_u32(a);
#else
simde_int32x4_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_u32
#define vreinterpretq_s32_u32(a) simde_vreinterpretq_s32_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_u64(a);
#else
simde_int32x4_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_u64
#define vreinterpretq_s32_u64(a) simde_vreinterpretq_s32_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s32_f32(a);
#else
simde_int32x4_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_f32
#define vreinterpretq_s32_f32(a) simde_vreinterpretq_s32_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vreinterpretq_s32_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_s32_f64(a);
#else
simde_int32x4_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s32_f64
#define vreinterpretq_s32_f64(a) simde_vreinterpretq_s32_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_s8(a);
#else
simde_int64x1_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_s8
#define vreinterpret_s64_s8 simde_vreinterpret_s64_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_s16(a);
#else
simde_int64x1_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_s16
#define vreinterpret_s64_s16 simde_vreinterpret_s64_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_s32(a);
#else
simde_int64x1_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_s32
#define vreinterpret_s64_s32 simde_vreinterpret_s64_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_u8(a);
#else
simde_int64x1_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_u8
#define vreinterpret_s64_u8 simde_vreinterpret_s64_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_u16(a);
#else
simde_int64x1_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_u16
#define vreinterpret_s64_u16 simde_vreinterpret_s64_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_u32(a);
#else
simde_int64x1_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_u32
#define vreinterpret_s64_u32 simde_vreinterpret_s64_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_u64(a);
#else
simde_int64x1_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_u64
#define vreinterpret_s64_u64 simde_vreinterpret_s64_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_s64_f32(a);
#else
simde_int64x1_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_f32
#define vreinterpret_s64_f32 simde_vreinterpret_s64_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vreinterpret_s64_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_s64_f64(a);
#else
simde_int64x1_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_s64_f64
#define vreinterpret_s64_f64 simde_vreinterpret_s64_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_s8(a);
#else
simde_int64x2_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_s8
#define vreinterpretq_s64_s8(a) simde_vreinterpretq_s64_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_s16(a);
#else
simde_int64x2_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_s16
#define vreinterpretq_s64_s16(a) simde_vreinterpretq_s64_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_s32(a);
#else
simde_int64x2_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_s32
#define vreinterpretq_s64_s32(a) simde_vreinterpretq_s64_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_u8(a);
#else
simde_int64x2_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_u8
#define vreinterpretq_s64_u8(a) simde_vreinterpretq_s64_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_u16(a);
#else
simde_int64x2_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_u16
#define vreinterpretq_s64_u16(a) simde_vreinterpretq_s64_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_u32(a);
#else
simde_int64x2_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_u32
#define vreinterpretq_s64_u32(a) simde_vreinterpretq_s64_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_u64(a);
#else
simde_int64x2_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_u64
#define vreinterpretq_s64_u64(a) simde_vreinterpretq_s64_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_s64_f32(a);
#else
simde_int64x2_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_f32
#define vreinterpretq_s64_f32(a) simde_vreinterpretq_s64_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vreinterpretq_s64_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_s64_f64(a);
#else
simde_int64x2_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_s64_f64
#define vreinterpretq_s64_f64(a) simde_vreinterpretq_s64_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_s8(a);
#else
simde_uint8x8_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_s8
#define vreinterpret_u8_s8 simde_vreinterpret_u8_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_s16(a);
#else
simde_uint8x8_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_s16
#define vreinterpret_u8_s16 simde_vreinterpret_u8_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_s32(a);
#else
simde_uint8x8_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_s32
#define vreinterpret_u8_s32 simde_vreinterpret_u8_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_s64(a);
#else
simde_uint8x8_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_s64
#define vreinterpret_u8_s64 simde_vreinterpret_u8_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_u16(a);
#else
simde_uint8x8_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_u16
#define vreinterpret_u8_u16 simde_vreinterpret_u8_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_u32(a);
#else
simde_uint8x8_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_u32
#define vreinterpret_u8_u32 simde_vreinterpret_u8_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_u64(a);
#else
simde_uint8x8_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_u64
#define vreinterpret_u8_u64 simde_vreinterpret_u8_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u8_f32(a);
#else
simde_uint8x8_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_f32
#define vreinterpret_u8_f32 simde_vreinterpret_u8_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vreinterpret_u8_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_u8_f64(a);
#else
simde_uint8x8_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u8_f64
#define vreinterpret_u8_f64 simde_vreinterpret_u8_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_s8(a);
#else
simde_uint8x16_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_s8
#define vreinterpretq_u8_s8(a) simde_vreinterpretq_u8_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_s16(a);
#else
simde_uint8x16_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_s16
#define vreinterpretq_u8_s16(a) simde_vreinterpretq_u8_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_s32(a);
#else
simde_uint8x16_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_s32
#define vreinterpretq_u8_s32(a) simde_vreinterpretq_u8_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_s64(a);
#else
simde_uint8x16_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_s64
#define vreinterpretq_u8_s64(a) simde_vreinterpretq_u8_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_u16(a);
#else
simde_uint8x16_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_u16
#define vreinterpretq_u8_u16(a) simde_vreinterpretq_u8_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_u32(a);
#else
simde_uint8x16_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_u32
#define vreinterpretq_u8_u32(a) simde_vreinterpretq_u8_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_u64(a);
#else
simde_uint8x16_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_u64
#define vreinterpretq_u8_u64(a) simde_vreinterpretq_u8_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u8_f32(a);
#else
simde_uint8x16_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_f32
#define vreinterpretq_u8_f32(a) simde_vreinterpretq_u8_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vreinterpretq_u8_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_u8_f64(a);
#else
simde_uint8x16_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u8_f64
#define vreinterpretq_u8_f64(a) simde_vreinterpretq_u8_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_s8(a);
#else
simde_uint16x4_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_s8
#define vreinterpret_u16_s8 simde_vreinterpret_u16_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_s16(a);
#else
simde_uint16x4_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_s16
#define vreinterpret_u16_s16 simde_vreinterpret_u16_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_s32(a);
#else
simde_uint16x4_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_s32
#define vreinterpret_u16_s32 simde_vreinterpret_u16_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_s64(a);
#else
simde_uint16x4_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_s64
#define vreinterpret_u16_s64 simde_vreinterpret_u16_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_u8(a);
#else
simde_uint16x4_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_u8
#define vreinterpret_u16_u8 simde_vreinterpret_u16_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_u32(a);
#else
simde_uint16x4_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_u32
#define vreinterpret_u16_u32 simde_vreinterpret_u16_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_u64(a);
#else
simde_uint16x4_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_u64
#define vreinterpret_u16_u64 simde_vreinterpret_u16_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_f16(simde_float16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vreinterpret_u16_f16(a);
#else
simde_uint16x4_private r_;
simde_float16x4_private a_ = simde_float16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_f16
#define vreinterpret_u16_f16(a) simde_vreinterpret_u16_f16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u16_f32(a);
#else
simde_uint16x4_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_f32
#define vreinterpret_u16_f32 simde_vreinterpret_u16_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vreinterpret_u16_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_u16_f64(a);
#else
simde_uint16x4_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u16_f64
#define vreinterpret_u16_f64 simde_vreinterpret_u16_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_s8(a);
#else
simde_uint16x8_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_s8
#define vreinterpretq_u16_s8(a) simde_vreinterpretq_u16_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_s16(a);
#else
simde_uint16x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_s16
#define vreinterpretq_u16_s16(a) simde_vreinterpretq_u16_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_s32(a);
#else
simde_uint16x8_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_s32
#define vreinterpretq_u16_s32(a) simde_vreinterpretq_u16_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_s64(a);
#else
simde_uint16x8_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_s64
#define vreinterpretq_u16_s64(a) simde_vreinterpretq_u16_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_u8(a);
#else
simde_uint16x8_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_u8
#define vreinterpretq_u16_u8(a) simde_vreinterpretq_u16_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_u32(a);
#else
simde_uint16x8_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_u32
#define vreinterpretq_u16_u32(a) simde_vreinterpretq_u16_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_u64(a);
#else
simde_uint16x8_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_u64
#define vreinterpretq_u16_u64(a) simde_vreinterpretq_u16_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u16_f32(a);
#else
simde_uint16x8_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_f32
#define vreinterpretq_u16_f32(a) simde_vreinterpretq_u16_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_u16_f64(a);
#else
simde_uint16x8_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_f64
#define vreinterpretq_u16_f64(a) simde_vreinterpretq_u16_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_s8(a);
#else
simde_uint32x2_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_s8
#define vreinterpret_u32_s8 simde_vreinterpret_u32_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_s16(a);
#else
simde_uint32x2_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_s16
#define vreinterpret_u32_s16 simde_vreinterpret_u32_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_s32(a);
#else
simde_uint32x2_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_s32
#define vreinterpret_u32_s32 simde_vreinterpret_u32_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_s64(a);
#else
simde_uint32x2_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_s64
#define vreinterpret_u32_s64 simde_vreinterpret_u32_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_u8(a);
#else
simde_uint32x2_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_u8
#define vreinterpret_u32_u8 simde_vreinterpret_u32_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_u16(a);
#else
simde_uint32x2_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_u16
#define vreinterpret_u32_u16 simde_vreinterpret_u32_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_u64(a);
#else
simde_uint32x2_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_u64
#define vreinterpret_u32_u64 simde_vreinterpret_u32_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u32_f32(a);
#else
simde_uint32x2_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_f32
#define vreinterpret_u32_f32 simde_vreinterpret_u32_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vreinterpret_u32_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_u32_f64(a);
#else
simde_uint32x2_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u32_f64
#define vreinterpret_u32_f64 simde_vreinterpret_u32_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_s8(a);
#else
simde_uint32x4_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_s8
#define vreinterpretq_u32_s8(a) simde_vreinterpretq_u32_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_s16(a);
#else
simde_uint32x4_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_s16
#define vreinterpretq_u32_s16(a) simde_vreinterpretq_u32_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_s32(a);
#else
simde_uint32x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_s32
#define vreinterpretq_u32_s32(a) simde_vreinterpretq_u32_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_s64(a);
#else
simde_uint32x4_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_s64
#define vreinterpretq_u32_s64(a) simde_vreinterpretq_u32_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_u8(a);
#else
simde_uint32x4_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_u8
#define vreinterpretq_u32_u8(a) simde_vreinterpretq_u32_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_u16(a);
#else
simde_uint32x4_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_u16
#define vreinterpretq_u32_u16(a) simde_vreinterpretq_u32_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_u64(a);
#else
simde_uint32x4_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_u64
#define vreinterpretq_u32_u64(a) simde_vreinterpretq_u32_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vreinterpretq_u16_f16(simde_float16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vreinterpretq_u16_f16(a);
#else
simde_uint16x8_private r_;
simde_float16x8_private a_ = simde_float16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u16_f16
#define vreinterpretq_u16_f16(a) simde_vreinterpretq_u16_f16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u32_f32(a);
#else
simde_uint32x4_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_f32
#define vreinterpretq_u32_f32(a) simde_vreinterpretq_u32_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vreinterpretq_u32_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_u32_f64(a);
#else
simde_uint32x4_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u32_f64
#define vreinterpretq_u32_f64(a) simde_vreinterpretq_u32_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_s8(a);
#else
simde_uint64x1_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_s8
#define vreinterpret_u64_s8 simde_vreinterpret_u64_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_s16(a);
#else
simde_uint64x1_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_s16
#define vreinterpret_u64_s16 simde_vreinterpret_u64_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_s32(a);
#else
simde_uint64x1_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_s32
#define vreinterpret_u64_s32 simde_vreinterpret_u64_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_s64(a);
#else
simde_uint64x1_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_s64
#define vreinterpret_u64_s64 simde_vreinterpret_u64_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_u8(a);
#else
simde_uint64x1_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_u8
#define vreinterpret_u64_u8 simde_vreinterpret_u64_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_u16(a);
#else
simde_uint64x1_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_u16
#define vreinterpret_u64_u16 simde_vreinterpret_u64_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_u32(a);
#else
simde_uint64x1_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_u32
#define vreinterpret_u64_u32 simde_vreinterpret_u64_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_u64_f32(a);
#else
simde_uint64x1_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_f32
#define vreinterpret_u64_f32 simde_vreinterpret_u64_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vreinterpret_u64_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_u64_f64(a);
#else
simde_uint64x1_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_u64_f64
#define vreinterpret_u64_f64 simde_vreinterpret_u64_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_s8(a);
#else
simde_uint64x2_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_s8
#define vreinterpretq_u64_s8(a) simde_vreinterpretq_u64_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_s16(a);
#else
simde_uint64x2_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_s16
#define vreinterpretq_u64_s16(a) simde_vreinterpretq_u64_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_s32(a);
#else
simde_uint64x2_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_s32
#define vreinterpretq_u64_s32(a) simde_vreinterpretq_u64_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_s64(a);
#else
simde_uint64x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_s64
#define vreinterpretq_u64_s64(a) simde_vreinterpretq_u64_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_u8(a);
#else
simde_uint64x2_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_u8
#define vreinterpretq_u64_u8(a) simde_vreinterpretq_u64_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_u16(a);
#else
simde_uint64x2_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_u16
#define vreinterpretq_u64_u16(a) simde_vreinterpretq_u64_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_u32(a);
#else
simde_uint64x2_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_u32
#define vreinterpretq_u64_u32(a) simde_vreinterpretq_u64_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_f32(a);
#else
simde_uint64x2_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_f32
#define vreinterpretq_u64_f32(a) simde_vreinterpretq_u64_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vreinterpretq_u64_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_u64_f64(a);
#else
simde_uint64x2_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_u64_f64
#define vreinterpretq_u64_f64(a) simde_vreinterpretq_u64_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_s8(a);
#else
simde_float32x2_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_s8
#define vreinterpret_f32_s8 simde_vreinterpret_f32_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_s16(a);
#else
simde_float32x2_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_s16
#define vreinterpret_f32_s16 simde_vreinterpret_f32_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_s32(a);
#else
simde_float32x2_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_s32
#define vreinterpret_f32_s32 simde_vreinterpret_f32_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_s64(a);
#else
simde_float32x2_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_s64
#define vreinterpret_f32_s64 simde_vreinterpret_f32_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_u8(a);
#else
simde_float32x2_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_u8
#define vreinterpret_f32_u8 simde_vreinterpret_f32_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_u16(a);
#else
simde_float32x2_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_u16
#define vreinterpret_f32_u16 simde_vreinterpret_f32_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float16x4_t
simde_vreinterpret_f16_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vreinterpret_f16_u16(a);
#else
simde_float16x4_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f16_u16
#define vreinterpret_f16_u16(a) simde_vreinterpret_f16_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_u32(a);
#else
simde_float32x2_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_u32
#define vreinterpret_f32_u32 simde_vreinterpret_f32_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpret_f32_u64(a);
#else
simde_float32x2_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_u64
#define vreinterpret_f32_u64 simde_vreinterpret_f32_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vreinterpret_f32_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f32_f64(a);
#else
simde_float32x2_private r_;
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f32_f64
#define vreinterpret_f32_f64 simde_vreinterpret_f32_f64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_s8(a);
#else
simde_float32x4_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_s8
#define vreinterpretq_f32_s8(a) simde_vreinterpretq_f32_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_s16(a);
#else
simde_float32x4_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_s16
#define vreinterpretq_f32_s16(a) simde_vreinterpretq_f32_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_s32(a);
#else
simde_float32x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_s32
#define vreinterpretq_f32_s32(a) simde_vreinterpretq_f32_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_s64(a);
#else
simde_float32x4_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_s64
#define vreinterpretq_f32_s64(a) simde_vreinterpretq_f32_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_u8(a);
#else
simde_float32x4_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_u8
#define vreinterpretq_f32_u8(a) simde_vreinterpretq_f32_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_u16(a);
#else
simde_float32x4_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_u16
#define vreinterpretq_f32_u16(a) simde_vreinterpretq_f32_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float16x8_t
simde_vreinterpretq_f16_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vreinterpretq_f16_u16(a);
#else
simde_float16x8_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f16_u16
#define vreinterpretq_f16_u16(a) simde_vreinterpretq_f16_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_u32(a);
#else
simde_float32x4_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_u32
#define vreinterpretq_f32_u32(a) simde_vreinterpretq_f32_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_f32_u64(a);
#else
simde_float32x4_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_u64
#define vreinterpretq_f32_u64(a) simde_vreinterpretq_f32_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vreinterpretq_f32_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f32_f64(a);
#else
simde_float32x4_private r_;
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f32_f64
#define vreinterpretq_f32_f64(a) simde_vreinterpretq_f32_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_s8(a);
#else
simde_float64x1_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_s8
#define vreinterpret_f64_s8 simde_vreinterpret_f64_s8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_s16(a);
#else
simde_float64x1_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_s16
#define vreinterpret_f64_s16 simde_vreinterpret_f64_s16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_s32(a);
#else
simde_float64x1_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_s32
#define vreinterpret_f64_s32 simde_vreinterpret_f64_s32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_s64(a);
#else
simde_float64x1_private r_;
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_s64
#define vreinterpret_f64_s64 simde_vreinterpret_f64_s64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_u8(a);
#else
simde_float64x1_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_u8
#define vreinterpret_f64_u8 simde_vreinterpret_f64_u8
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_u16(a);
#else
simde_float64x1_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_u16
#define vreinterpret_f64_u16 simde_vreinterpret_f64_u16
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_u32(a);
#else
simde_float64x1_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_u32
#define vreinterpret_f64_u32 simde_vreinterpret_f64_u32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_u64(simde_uint64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_u64(a);
#else
simde_float64x1_private r_;
simde_uint64x1_private a_ = simde_uint64x1_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_u64
#define vreinterpret_f64_u64 simde_vreinterpret_f64_u64
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vreinterpret_f64_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpret_f64_f32(a);
#else
simde_float64x1_private r_;
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpret_f64_f32
#define vreinterpret_f64_f32 simde_vreinterpret_f64_f32
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_s8(a);
#else
simde_float64x2_private r_;
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_s8
#define vreinterpretq_f64_s8(a) simde_vreinterpretq_f64_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_s16(a);
#else
simde_float64x2_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_s16
#define vreinterpretq_f64_s16(a) simde_vreinterpretq_f64_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_s32(a);
#else
simde_float64x2_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_s32
#define vreinterpretq_f64_s32(a) simde_vreinterpretq_f64_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_s64(a);
#else
simde_float64x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_s64
#define vreinterpretq_f64_s64(a) simde_vreinterpretq_f64_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_u8(a);
#else
simde_float64x2_private r_;
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_u8
#define vreinterpretq_f64_u8(a) simde_vreinterpretq_f64_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_u16(a);
#else
simde_float64x2_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_u16
#define vreinterpretq_f64_u16(a) simde_vreinterpretq_f64_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_u32(a);
#else
simde_float64x2_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_u32
#define vreinterpretq_f64_u32(a) simde_vreinterpretq_f64_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_u64(a);
#else
simde_float64x2_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_u64
#define vreinterpretq_f64_u64(a) simde_vreinterpretq_f64_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vreinterpretq_f64_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_f32(a);
#else
simde_float64x2_private r_;
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_memcpy(&r_, &a_, sizeof(r_));
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vreinterpretq_f64_f32
#define vreinterpretq_f64_f32(a) simde_vreinterpretq_f64_f32(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mul.h | .h | 16,404 | 580 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MUL_H)
#define SIMDE_ARM_NEON_MUL_H
#include "types.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmul_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_f32(a, b);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_f32
#define vmul_f32(a, b) simde_vmul_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmul_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmul_f64(a, b);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_f64
#define vmul_f64(a, b) simde_vmul_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vmul_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_s8
#define vmul_s8(a, b) simde_vmul_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmul_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _m_pmullw(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_s16
#define vmul_s16(a, b) simde_vmul_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmul_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_s32
#define vmul_s32(a, b) simde_vmul_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_x_vmul_s64(simde_int64x1_t a, simde_int64x1_t b) {
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int64x1_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vmul_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_u8
#define vmul_u8(a, b) simde_vmul_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmul_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_u16
#define vmul_u16(a, b) simde_vmul_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmul_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_u32
#define vmul_u32(a, b) simde_vmul_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_x_vmul_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_uint64x1_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmulq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_f32(a, b);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_mul_ps(a_.m128, b_.m128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f32x4_mul(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_f32
#define vmulq_f32(a, b) simde_vmulq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmulq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmulq_f64(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128d = _mm_mul_pd(a_.m128d, b_.m128d);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f64x2_mul(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_f64
#define vmulq_f64(a, b) simde_vmulq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vmulq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mul(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
/* https://stackoverflow.com/a/29155682/501126 */
const __m128i dst_even = _mm_mullo_epi16(a_.m128i, b_.m128i);
r_.m128i =
_mm_or_si128(
_mm_slli_epi16(
_mm_mullo_epi16(
_mm_srli_epi16(a_.m128i, 8),
_mm_srli_epi16(b_.m128i, 8)
),
8
),
#if defined(SIMDE_X86_AVX2_NATIVE)
_mm_and_si128(dst_even, _mm_set1_epi16(0xFF))
#else
_mm_srli_epi16(
_mm_slli_epi16(dst_even, 8),
8
)
#endif
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_s8
#define vmulq_s8(a, b) simde_vmulq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmulq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_s16(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_mullo_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_s16
#define vmulq_s16(a, b) simde_vmulq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmulq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_s32(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_mul(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_s32
#define vmulq_s32(a, b) simde_vmulq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_x_vmulq_s64(simde_int64x2_t a, simde_int64x2_t b) {
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_mul(a_.v128, b_.v128);
#elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
r_.m128i = _mm_mullo_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values * b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vmulq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_u8(a, b);
#else
return
simde_vreinterpretq_u8_s8(
simde_vmulq_s8(
simde_vreinterpretq_s8_u8(a),
simde_vreinterpretq_s8_u8(b)
)
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_u8
#define vmulq_u8(a, b) simde_vmulq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmulq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_u16(a, b);
#else
return
simde_vreinterpretq_u16_s16(
simde_vmulq_s16(
simde_vreinterpretq_s16_u16(a),
simde_vreinterpretq_s16_u16(b)
)
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_u16
#define vmulq_u16(a, b) simde_vmulq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmulq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_u32(a, b);
#else
return
simde_vreinterpretq_u32_s32(
simde_vmulq_s32(
simde_vreinterpretq_s32_u32(a),
simde_vreinterpretq_s32_u32(b)
)
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_u32
#define vmulq_u32(a, b) simde_vmulq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_x_vmulq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
return
simde_vreinterpretq_u64_s64(
simde_x_vmulq_s64(
simde_vreinterpretq_s64_u64(a),
simde_vreinterpretq_s64_u64(b)
)
);
}
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MUL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlal_high.h | .h | 5,259 | 157 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MLAL_HIGH_H)
#define SIMDE_ARM_NEON_MLAL_HIGH_H
#include "movl_high.h"
#include "mla.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlal_high_s8(simde_int16x8_t a, simde_int8x16_t b, simde_int8x16_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlal_high_s8(a, b, c);
#else
return simde_vmlaq_s16(a, simde_vmovl_high_s8(b), simde_vmovl_high_s8(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_high_s8
#define vmlal_high_s8(a, b, c) simde_vmlal_high_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlal_high_s16(simde_int32x4_t a, simde_int16x8_t b, simde_int16x8_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlal_high_s16(a, b, c);
#else
return simde_vmlaq_s32(a, simde_vmovl_high_s16(b), simde_vmovl_high_s16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_high_s16
#define vmlal_high_s16(a, b, c) simde_vmlal_high_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmlal_high_s32(simde_int64x2_t a, simde_int32x4_t b, simde_int32x4_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlal_high_s32(a, b, c);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(simde_vmovl_high_s32(b)),
c_ = simde_int64x2_to_private(simde_vmovl_high_s32(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = (b_.values * c_.values) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_high_s32
#define vmlal_high_s32(a, b, c) simde_vmlal_high_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlal_high_u8(simde_uint16x8_t a, simde_uint8x16_t b, simde_uint8x16_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlal_high_u8(a, b, c);
#else
return simde_vmlaq_u16(a, simde_vmovl_high_u8(b), simde_vmovl_high_u8(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_high_u8
#define vmlal_high_u8(a, b, c) simde_vmlal_high_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlal_high_u16(simde_uint32x4_t a, simde_uint16x8_t b, simde_uint16x8_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlal_high_u16(a, b, c);
#else
return simde_vmlaq_u32(a, simde_vmovl_high_u16(b), simde_vmovl_high_u16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_high_u16
#define vmlal_high_u16(a, b, c) simde_vmlal_high_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmlal_high_u32(simde_uint64x2_t a, simde_uint32x4_t b, simde_uint32x4_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlal_high_u32(a, b, c);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(simde_vmovl_high_u32(b)),
c_ = simde_uint64x2_to_private(simde_vmovl_high_u32(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = (b_.values * c_.values) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_high_u32
#define vmlal_high_u32(a, b, c) simde_vmlal_high_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLAL_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mls_n.h | .h | 5,586 | 182 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MLS_N_H)
#define SIMDE_ARM_NEON_MLS_N_H
#include "sub.h"
#include "dup_n.h"
#include "mls.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmls_n_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32 c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_n_f32(a, b, c);
#else
return simde_vmls_f32(a, b, simde_vdup_n_f32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_n_f32
#define vmls_n_f32(a, b, c) simde_vmls_n_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmls_n_s16(simde_int16x4_t a, simde_int16x4_t b, int16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_n_s16(a, b, c);
#else
return simde_vmls_s16(a, b, simde_vdup_n_s16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_n_s16
#define vmls_n_s16(a, b, c) simde_vmls_n_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmls_n_s32(simde_int32x2_t a, simde_int32x2_t b, int32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_n_s32(a, b, c);
#else
return simde_vmls_s32(a, b, simde_vdup_n_s32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_n_s32
#define vmls_n_s32(a, b, c) simde_vmls_n_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmls_n_u16(simde_uint16x4_t a, simde_uint16x4_t b, uint16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_n_u16(a, b, c);
#else
return simde_vmls_u16(a, b, simde_vdup_n_u16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_n_u16
#define vmls_n_u16(a, b, c) simde_vmls_n_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmls_n_u32(simde_uint32x2_t a, simde_uint32x2_t b, uint32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_n_u32(a, b, c);
#else
return simde_vmls_u32(a, b, simde_vdup_n_u32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_n_u32
#define vmls_n_u32(a, b, c) simde_vmls_n_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmlsq_n_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32 c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_n_f32(a, b, c);
#else
return simde_vmlsq_f32(a, b, simde_vdupq_n_f32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_n_f32
#define vmlsq_n_f32(a, b, c) simde_vmlsq_n_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlsq_n_s16(simde_int16x8_t a, simde_int16x8_t b, int16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_n_s16(a, b, c);
#else
return simde_vmlsq_s16(a, b, simde_vdupq_n_s16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_n_s16
#define vmlsq_n_s16(a, b, c) simde_vmlsq_n_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlsq_n_s32(simde_int32x4_t a, simde_int32x4_t b, int32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_n_s32(a, b, c);
#else
return simde_vmlsq_s32(a, b, simde_vdupq_n_s32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_n_s32
#define vmlsq_n_s32(a, b, c) simde_vmlsq_n_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlsq_n_u16(simde_uint16x8_t a, simde_uint16x8_t b, uint16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_n_u16(a, b, c);
#else
return simde_vmlsq_u16(a, b, simde_vdupq_n_u16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_n_u16
#define vmlsq_n_u16(a, b, c) simde_vmlsq_n_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlsq_n_u32(simde_uint32x4_t a, simde_uint32x4_t b, uint32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_n_u32(a, b, c);
#else
return simde_vmlsq_u32(a, b, simde_vdupq_n_u32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_n_u32
#define vmlsq_n_u32(a, b, c) simde_vmlsq_n_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLS_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/dot_lane.h | .h | 16,404 | 492 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_DOT_LANE_H)
#define SIMDE_ARM_NEON_DOT_LANE_H
#include "types.h"
#include "add.h"
#include "dup_lane.h"
#include "paddl.h"
#include "movn.h"
#include "mull.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vdot_lane_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_int32x2_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_2_(vdot_lane_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int32x2_t
b_lane,
b_32 = vreinterpret_s32_s8(b);
SIMDE_CONSTIFY_2_(vdup_lane_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vadd_s32(
r,
vmovn_s64(
vpaddlq_s32(
vpaddlq_s16(
vmull_s8(a, vreinterpret_s8_s32(b_lane))
)
)
)
);
#else
simde_int32x2_private r_ = simde_int32x2_to_private(r);
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
for (int i = 0 ; i < 2 ; i++) {
int32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_int32x2_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdot_lane_s32
#define vdot_lane_s32(r, a, b, lane) simde_vdot_lane_s32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vdot_lane_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_uint32x2_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_2_(vdot_lane_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint32x2_t
b_lane,
b_32 = vreinterpret_u32_u8(b);
SIMDE_CONSTIFY_2_(vdup_lane_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vadd_u32(
r,
vmovn_u64(
vpaddlq_u32(
vpaddlq_u16(
vmull_u8(a, vreinterpret_u8_u32(b_lane))
)
)
)
);
#else
simde_uint32x2_private r_ = simde_uint32x2_to_private(r);
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
for (int i = 0 ; i < 2 ; i++) {
uint32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_uint32x2_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdot_lane_u32
#define vdot_lane_u32(r, a, b, lane) simde_vdot_lane_u32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vdot_laneq_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x16_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int32x2_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_4_(vdot_laneq_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int32x2_t b_lane;
simde_int32x4_t b_32 = vreinterpretq_s32_s8(b);
SIMDE_CONSTIFY_4_(simde_vdup_laneq_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vadd_s32(
r,
vmovn_s64(
vpaddlq_s32(
vpaddlq_s16(
vmull_s8(a, vreinterpret_s8_s32(b_lane))
)
)
)
);
#else
simde_int32x2_private r_ = simde_int32x2_to_private(r);
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_int8x16_private b_ = simde_int8x16_to_private(b);
for (int i = 0 ; i < 2 ; i++) {
int32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_int32x2_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdot_laneq_s32
#define vdot_laneq_s32(r, a, b, lane) simde_vdot_laneq_s32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vdot_laneq_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x16_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint32x2_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_4_(vdot_laneq_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint32x2_t b_lane;
simde_uint32x4_t b_32 = vreinterpretq_u32_u8(b);
SIMDE_CONSTIFY_4_(simde_vdup_laneq_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vadd_u32(
r,
vmovn_u64(
vpaddlq_u32(
vpaddlq_u16(
vmull_u8(a, vreinterpret_u8_u32(b_lane))
)
)
)
);
#else
simde_uint32x2_private r_ = simde_uint32x2_to_private(r);
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
simde_uint8x16_private b_ = simde_uint8x16_to_private(b);
for (int i = 0 ; i < 2 ; i++) {
uint32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for (int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_uint32x2_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdot_laneq_u32
#define vdot_laneq_u32(r, a, b, lane) simde_vdot_laneq_u32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vdotq_laneq_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x16_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint32x4_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_4_(vdotq_laneq_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint32x4_t
b_lane,
b_32 = vreinterpretq_u32_u8(b);
SIMDE_CONSTIFY_4_(simde_vdupq_laneq_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vcombine_u32(
vadd_u32(
vget_low_u32(r),
vmovn_u64(
vpaddlq_u32(
vpaddlq_u16(
vmull_u8(vget_low_u8(a), vget_low_u8(vreinterpretq_u8_u32(b_lane)))
)
)
)
),
vadd_u32(
vget_high_u32(r),
vmovn_u64(
vpaddlq_u32(
vpaddlq_u16(
vmull_u8(vget_high_u8(a), vget_high_u8(vreinterpretq_u8_u32(b_lane)))
)
)
)
)
);
#else
simde_uint32x4_private r_ = simde_uint32x4_to_private(r);
simde_uint8x16_private
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
for(int i = 0 ; i < 4 ; i++) {
uint32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for(int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_uint32x4_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdotq_laneq_u32
#define vdotq_laneq_u32(r, a, b, lane) simde_vdotq_laneq_u32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vdotq_laneq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int32x4_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_4_(vdotq_laneq_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int32x4_t
b_lane,
b_32 = vreinterpretq_s32_s8(b);
SIMDE_CONSTIFY_4_(simde_vdupq_laneq_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vcombine_s32(
vadd_s32(
vget_low_s32(r),
vmovn_s64(
vpaddlq_s32(
vpaddlq_s16(
vmull_s8(vget_low_s8(a), vget_low_s8(vreinterpretq_s8_s32(b_lane)))
)
)
)
),
vadd_s32(
vget_high_s32(r),
vmovn_s64(
vpaddlq_s32(
vpaddlq_s16(
vmull_s8(vget_high_s8(a), vget_high_s8(vreinterpretq_s8_s32(b_lane)))
)
)
)
)
);
#else
simde_int32x4_private r_ = simde_int32x4_to_private(r);
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
for(int i = 0 ; i < 4 ; i++) {
int32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for(int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_int32x4_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdotq_laneq_s32
#define vdotq_laneq_s32(r, a, b, lane) simde_vdotq_laneq_s32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vdotq_lane_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_uint32x4_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_2_(vdotq_lane_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_uint32x2_t
b_lane,
b_32 = vreinterpret_u32_u8(b);
SIMDE_CONSTIFY_2_(simde_vdup_lane_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vcombine_u32(
vadd_u32(
vget_low_u32(r),
vmovn_u64(
vpaddlq_u32(
vpaddlq_u16(
vmull_u8(vget_low_u8(a), vreinterpret_u8_u32(b_lane))
)
)
)
),
vadd_u32(
vget_high_u32(r),
vmovn_u64(
vpaddlq_u32(
vpaddlq_u16(
vmull_u8(vget_high_u8(a), vreinterpret_u8_u32(b_lane))
)
)
)
)
);
#else
simde_uint32x4_private r_ = simde_uint32x4_to_private(r);
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_uint8x8_private b_ = simde_uint8x8_to_private(b);
for(int i = 0 ; i < 4 ; i++) {
uint32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for(int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_uint32x4_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdotq_lane_u32
#define vdotq_lane_u32(r, a, b, lane) simde_vdotq_lane_u32((r), (a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vdotq_lane_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_int32x4_t result;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD)
SIMDE_CONSTIFY_2_(vdotq_lane_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde_int32x2_t
b_lane,
b_32 = vreinterpret_s32_s8(b);
SIMDE_CONSTIFY_2_(simde_vdup_lane_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32);
result =
vcombine_s32(
vadd_s32(
vget_low_s32(r),
vmovn_s64(
vpaddlq_s32(
vpaddlq_s16(
vmull_s8(vget_low_s8(a), vreinterpret_s8_s32(b_lane))
)
)
)
),
vadd_s32(
vget_high_s32(r),
vmovn_s64(
vpaddlq_s32(
vpaddlq_s16(
vmull_s8(vget_high_s8(a), vreinterpret_s8_s32(b_lane))
)
)
)
)
);
#else
simde_int32x4_private r_ = simde_int32x4_to_private(r);
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_int8x8_private b_ = simde_int8x8_to_private(b);
for(int i = 0 ; i < 4 ; i++) {
int32_t acc = 0;
SIMDE_VECTORIZE_REDUCTION(+:acc)
for(int j = 0 ; j < 4 ; j++) {
const int idx_b = j + (lane << 2);
const int idx_a = j + (i << 2);
acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]);
}
r_.values[i] += acc;
}
result = simde_int32x4_from_private(r_);
#endif
return result;
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD))
#undef vdotq_lane_s32
#define vdotq_lane_s32(r, a, b, lane) simde_vdotq_lane_s32((r), (a), (b), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_DOT_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/st1_lane.h | .h | 12,450 | 364 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ST1_LANE_H)
#define SIMDE_ARM_NEON_ST1_LANE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_f32(simde_float32_t *ptr, simde_float32x2_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst1_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float32x2_private val_ = simde_float32x2_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_f32
#define vst1_lane_f32(a, b, c) simde_vst1_lane_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_f64(simde_float64_t *ptr, simde_float64x1_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) lane;
vst1_lane_f64(ptr, val, 0);
#else
simde_float64x1_private val_ = simde_float64x1_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_f64
#define vst1_lane_f64(a, b, c) simde_vst1_lane_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_s8(int8_t *ptr, simde_int8x8_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst1_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int8x8_private val_ = simde_int8x8_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_s8
#define vst1_lane_s8(a, b, c) simde_vst1_lane_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_s16(int16_t *ptr, simde_int16x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst1_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int16x4_private val_ = simde_int16x4_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_s16
#define vst1_lane_s16(a, b, c) simde_vst1_lane_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_s32(int32_t *ptr, simde_int32x2_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst1_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int32x2_private val_ = simde_int32x2_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_s32
#define vst1_lane_s32(a, b, c) simde_vst1_lane_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_s64(int64_t *ptr, simde_int64x1_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
(void) lane;
vst1_lane_s64(ptr, val, 0);
#else
simde_int64x1_private val_ = simde_int64x1_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_s64
#define vst1_lane_s64(a, b, c) simde_vst1_lane_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_u8(uint8_t *ptr, simde_uint8x8_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst1_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint8x8_private val_ = simde_uint8x8_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_u8
#define vst1_lane_u8(a, b, c) simde_vst1_lane_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_u16(uint16_t *ptr, simde_uint16x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst1_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint16x4_private val_ = simde_uint16x4_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_u16
#define vst1_lane_u16(a, b, c) simde_vst1_lane_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_u32(uint32_t *ptr, simde_uint32x2_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst1_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint32x2_private val_ = simde_uint32x2_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_u32
#define vst1_lane_u32(a, b, c) simde_vst1_lane_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1_lane_u64(uint64_t *ptr, simde_uint64x1_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
(void) lane;
vst1_lane_u64(ptr, val, 0);
#else
simde_uint64x1_private val_ = simde_uint64x1_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1_lane_u64
#define vst1_lane_u64(a, b, c) simde_vst1_lane_u64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_f32(simde_float32_t *ptr, simde_float32x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst1q_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float32x4_private val_ = simde_float32x4_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_f32
#define vst1q_lane_f32(a, b, c) simde_vst1q_lane_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_f64(simde_float64_t *ptr, simde_float64x2_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst1q_lane_f64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float64x2_private val_ = simde_float64x2_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_f64
#define vst1q_lane_f64(a, b, c) simde_vst1q_lane_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_s8(int8_t *ptr, simde_int8x16_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_16_NO_RESULT_(vst1q_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int8x16_private val_ = simde_int8x16_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_s8
#define vst1q_lane_s8(a, b, c) simde_vst1q_lane_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_s16(int16_t *ptr, simde_int16x8_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst1q_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int16x8_private val_ = simde_int16x8_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_s16
#define vst1q_lane_s16(a, b, c) simde_vst1q_lane_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_s32(int32_t *ptr, simde_int32x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst1q_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int32x4_private val_ = simde_int32x4_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_s32
#define vst1q_lane_s32(a, b, c) simde_vst1q_lane_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_s64(int64_t *ptr, simde_int64x2_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst1q_lane_s64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int64x2_private val_ = simde_int64x2_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_s64
#define vst1q_lane_s64(a, b, c) simde_vst1q_lane_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_u8(uint8_t *ptr, simde_uint8x16_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_16_NO_RESULT_(vst1q_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint8x16_private val_ = simde_uint8x16_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_u8
#define vst1q_lane_u8(a, b, c) simde_vst1q_lane_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_u16(uint16_t *ptr, simde_uint16x8_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst1q_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint16x8_private val_ = simde_uint16x8_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_u16
#define vst1q_lane_u16(a, b, c) simde_vst1q_lane_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_u32(uint32_t *ptr, simde_uint32x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst1q_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint32x4_private val_ = simde_uint32x4_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_u32
#define vst1q_lane_u32(a, b, c) simde_vst1q_lane_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst1q_lane_u64(uint64_t *ptr, simde_uint64x2_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst1q_lane_u64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint64x2_private val_ = simde_uint64x2_to_private(val);
*ptr = val_.values[lane];
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst1q_lane_u64
#define vst1q_lane_u64(a, b, c) simde_vst1q_lane_u64((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ST1_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlal_lane.h | .h | 4,826 | 121 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MLAL_LANE_H)
#define SIMDE_ARM_NEON_MLAL_LANE_H
#include "mlal.h"
#include "dup_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlal_lane_s16(a, b, v, lane) vmlal_lane_s16((a), (b), (v), (lane))
#else
#define simde_vmlal_lane_s16(a, b, v, lane) simde_vmlal_s16((a), (b), simde_vdup_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_lane_s16
#define vmlal_lane_s16(a, b, c, lane) simde_vmlal_lane_s16((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlal_lane_s32(a, b, v, lane) vmlal_lane_s32((a), (b), (v), (lane))
#else
#define simde_vmlal_lane_s32(a, b, v, lane) simde_vmlal_s32((a), (b), simde_vdup_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_lane_s32
#define vmlal_lane_s32(a, b, c, lane) simde_vmlal_lane_s32((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlal_lane_u16(a, b, v, lane) vmlal_lane_u16((a), (b), (v), (lane))
#else
#define simde_vmlal_lane_u16(a, b, v, lane) simde_vmlal_u16((a), (b), simde_vdup_lane_u16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_lane_u16
#define vmlal_lane_u16(a, b, c, lane) simde_vmlal_lane_u16((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlal_lane_u32(a, b, v, lane) vmlal_lane_u32((a), (b), (v), (lane))
#else
#define simde_vmlal_lane_u32(a, b, v, lane) simde_vmlal_u32((a), (b), simde_vdup_lane_u32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_lane_u32
#define vmlal_lane_u32(a, b, c, lane) simde_vmlal_lane_u32((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmlal_laneq_s16(a, b, v, lane) vmlal_laneq_s16((a), (b), (v), (lane))
#else
#define simde_vmlal_laneq_s16(a, b, v, lane) simde_vmlal_s16((a), (b), simde_vdup_laneq_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_laneq_s16
#define vmlal_laneq_s16(a, b, c, lane) simde_vmlal_laneq_s16((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmlal_laneq_s32(a, b, v, lane) vmlal_laneq_s32((a), (b), (v), (lane))
#else
#define simde_vmlal_laneq_s32(a, b, v, lane) simde_vmlal_s32((a), (b), simde_vdup_laneq_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_laneq_s32
#define vmlal_laneq_s32(a, b, c, lane) simde_vmlal_laneq_s32((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmlal_laneq_u16(a, b, v, lane) vmlal_laneq_u16((a), (b), (v), (lane))
#else
#define simde_vmlal_laneq_u16(a, b, v, lane) simde_vmlal_u16((a), (b), simde_vdup_laneq_u16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_laneq_u16
#define vmlal_laneq_u16(a, b, c, lane) simde_vmlal_laneq_u16((a), (b), (c), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmlal_laneq_u32(a, b, v, lane) vmlal_laneq_u32((a), (b), (v), (lane))
#else
#define simde_vmlal_laneq_u32(a, b, v, lane) simde_vmlal_u32((a), (b), simde_vdup_laneq_u32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlal_laneq_u32
#define vmlal_laneq_u32(a, b, c, lane) simde_vmlal_laneq_u32((a), (b), (c), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLAL_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/combine.h | .h | 11,610 | 344 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the folhighing conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_COMBINE_H)
#define SIMDE_ARM_NEON_COMBINE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vcombine_f32(simde_float32x2_t low, simde_float32x2_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_f32(low, high);
#else
simde_float32x4_private r_;
simde_float32x2_private
low_ = simde_float32x2_to_private(low),
high_ = simde_float32x2_to_private(high);
/* Note: __builtin_shufflevector can have a the output contain
* twice the number of elements, __builtin_shuffle cannot.
* Using SIMDE_SHUFFLE_VECTOR_ here would not work. */
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_f32
#define vcombine_f32(low, high) simde_vcombine_f32((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vcombine_f64(simde_float64x1_t low, simde_float64x1_t high) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcombine_f64(low, high);
#else
simde_float64x2_private r_;
simde_float64x1_private
low_ = simde_float64x1_to_private(low),
high_ = simde_float64x1_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcombine_f64
#define vcombine_f64(low, high) simde_vcombine_f64((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vcombine_s8(simde_int8x8_t low, simde_int8x8_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_s8(low, high);
#else
simde_int8x16_private r_;
simde_int8x8_private
low_ = simde_int8x8_to_private(low),
high_ = simde_int8x8_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_s8
#define vcombine_s8(low, high) simde_vcombine_s8((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vcombine_s16(simde_int16x4_t low, simde_int16x4_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_s16(low, high);
#else
simde_int16x8_private r_;
simde_int16x4_private
low_ = simde_int16x4_to_private(low),
high_ = simde_int16x4_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3, 4, 5, 6, 7);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_s16
#define vcombine_s16(low, high) simde_vcombine_s16((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vcombine_s32(simde_int32x2_t low, simde_int32x2_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_s32(low, high);
#else
simde_int32x4_private r_;
simde_int32x2_private
low_ = simde_int32x2_to_private(low),
high_ = simde_int32x2_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_s32
#define vcombine_s32(low, high) simde_vcombine_s32((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vcombine_s64(simde_int64x1_t low, simde_int64x1_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_s64(low, high);
#else
simde_int64x2_private r_;
simde_int64x1_private
low_ = simde_int64x1_to_private(low),
high_ = simde_int64x1_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_s64
#define vcombine_s64(low, high) simde_vcombine_s64((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vcombine_u8(simde_uint8x8_t low, simde_uint8x8_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_u8(low, high);
#else
simde_uint8x16_private r_;
simde_uint8x8_private
low_ = simde_uint8x8_to_private(low),
high_ = simde_uint8x8_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_u8
#define vcombine_u8(low, high) simde_vcombine_u8((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vcombine_u16(simde_uint16x4_t low, simde_uint16x4_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_u16(low, high);
#else
simde_uint16x8_private r_;
simde_uint16x4_private
low_ = simde_uint16x4_to_private(low),
high_ = simde_uint16x4_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3, 4, 5, 6, 7);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_u16
#define vcombine_u16(low, high) simde_vcombine_u16((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcombine_u32(simde_uint32x2_t low, simde_uint32x2_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_u32(low, high);
#else
simde_uint32x4_private r_;
simde_uint32x2_private
low_ = simde_uint32x2_to_private(low),
high_ = simde_uint32x2_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1, 2, 3);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_u32
#define vcombine_u32(low, high) simde_vcombine_u32((low), (high))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcombine_u64(simde_uint64x1_t low, simde_uint64x1_t high) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcombine_u64(low, high);
#else
simde_uint64x2_private r_;
simde_uint64x1_private
low_ = simde_uint64x1_to_private(low),
high_ = simde_uint64x1_to_private(high);
#if defined(SIMDE_VECTOR_SUBSCRIPT) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
r_.values = __builtin_shufflevector(low_.values, high_.values, 0, 1);
#else
size_t halfway = (sizeof(r_.values) / sizeof(r_.values[0])) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway ; i++) {
r_.values[i] = low_.values[i];
r_.values[i + halfway] = high_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcombine_u64
#define vcombine_u64(low, high) simde_vcombine_u64((low), (high))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_COMBINE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/neg.h | .h | 11,885 | 414 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_NEG_H)
#define SIMDE_ARM_NEON_NEG_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vnegd_s64(int64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0))
return vnegd_s64(a);
#else
return -a;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vnegd_s64
#define vnegd_s64(a) simde_vnegd_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vneg_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vneg_f32(a);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vneg_f32
#define vneg_f32(a) simde_vneg_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vneg_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vneg_f64(a);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vneg_f64
#define vneg_f64(a) simde_vneg_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vneg_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vneg_s8(a);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vneg_s8
#define vneg_s8(a) simde_vneg_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vneg_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vneg_s16(a);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vneg_s16
#define vneg_s16(a) simde_vneg_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vneg_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vneg_s32(a);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vneg_s32
#define vneg_s32(a) simde_vneg_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vneg_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vneg_s64(a);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vnegd_s64(a_.values[i]);
}
#endif
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vneg_s64
#define vneg_s64(a) simde_vneg_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vnegq_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vnegq_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return vec_neg(a);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f32x4_neg(a_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128 = _mm_castsi128_ps(_mm_xor_si128(_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << 31)), _mm_castps_si128(a_.m128)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vnegq_f32
#define vnegq_f32(a) simde_vnegq_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vnegq_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vnegq_f64(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return vec_neg(a);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f64x2_neg(a_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128d = _mm_castsi128_pd(_mm_xor_si128(_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << 63)), _mm_castpd_si128(a_.m128d)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vnegq_f64
#define vnegq_f64(a) simde_vnegq_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vnegq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vnegq_s8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return vec_neg(a);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_neg(a_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi8(_mm_setzero_si128(), a_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vnegq_s8
#define vnegq_s8(a) simde_vnegq_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vnegq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vnegq_s16(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return vec_neg(a);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_neg(a_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi16(_mm_setzero_si128(), a_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vnegq_s16
#define vnegq_s16(a) simde_vnegq_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vnegq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vnegq_s32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return vec_neg(a);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_neg(a_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi32(_mm_setzero_si128(), a_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = -(a_.values[i]);
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vnegq_s32
#define vnegq_s32(a) simde_vnegq_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vnegq_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vnegq_s64(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
return vec_neg(a);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_neg(a_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi64(_mm_setzero_si128(), a_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = -a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vnegd_s64(a_.values[i]);
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vnegq_s64
#define vnegq_s64(a) simde_vnegq_s64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_NEG_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/abdl.h | .h | 4,266 | 148 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_ABDL_H)
#define SIMDE_ARM_NEON_ABDL_H
#include "abs.h"
#include "subl.h"
#include "movl.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vabdl_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vabdl_s8(a, b);
#else
return simde_vabsq_s16(simde_vsubl_s8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vabdl_s8
#define vabdl_s8(a, b) simde_vabdl_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vabdl_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vabdl_s16(a, b);
#else
return simde_vabsq_s32(simde_vsubl_s16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vabdl_s16
#define vabdl_s16(a, b) simde_vabdl_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vabdl_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vabdl_s32(a, b);
#else
return simde_vabsq_s64(simde_vsubl_s32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vabdl_s32
#define vabdl_s32(a, b) simde_vabdl_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vabdl_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vabdl_u8(a, b);
#else
return simde_vreinterpretq_u16_s16(
simde_vabsq_s16(
simde_vsubq_s16(
simde_vreinterpretq_s16_u16(simde_vmovl_u8(a)),
simde_vreinterpretq_s16_u16(simde_vmovl_u8(b))
)
)
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vabdl_u8
#define vabdl_u8(a, b) simde_vabdl_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vabdl_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vabdl_u16(a, b);
#else
return simde_vreinterpretq_u32_s32(
simde_vabsq_s32(
simde_vsubq_s32(
simde_vreinterpretq_s32_u32(simde_vmovl_u16(a)),
simde_vreinterpretq_s32_u32(simde_vmovl_u16(b))
)
)
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vabdl_u16
#define vabdl_u16(a, b) simde_vabdl_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vabdl_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vabdl_u32(a, b);
#else
return simde_vreinterpretq_u64_s64(
simde_vabsq_s64(
simde_vsubq_s64(
simde_vreinterpretq_s64_u64(simde_vmovl_u32(a)),
simde_vreinterpretq_s64_u64(simde_vmovl_u32(b))
)
)
);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vabdl_u32
#define vabdl_u32(a, b) simde_vabdl_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ABDL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cmla_rot90.h | .h | 5,891 | 147 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_CMLA_ROT90_H)
#define SIMDE_ARM_NEON_CMLA_ROT90_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vcmla_rot90_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmla_rot90_f32(r, a, b);
#else
simde_float32x2_private
r_ = simde_float32x2_to_private(r),
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 1, 1);
b_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, -b_.values, b_.values, 1, 2);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += -(b_.values[2 * i + 1]) * a_.values[2 * i + 1];
r_.values[2 * i + 1] += b_.values[2 * i] * a_.values[2 * i + 1];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmla_rot90_f32
#define vcmla_rot90_f32(r, a, b) simde_vcmla_rot90_f32(r, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vcmlaq_rot90_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmlaq_rot90_f32(r, a, b);
#else
simde_float32x4_private
r_ = simde_float32x4_to_private(r),
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
a_.v128 = wasm_i32x4_shuffle(a_.v128, a_.v128, 1, 1, 3, 3);
b_.v128 = wasm_i32x4_shuffle(wasm_f32x4_neg(b_.v128), b_.v128, 1, 4, 3, 6);
r_.v128 = wasm_f32x4_add(r_.v128, wasm_f32x4_mul(b_.v128, a_.v128));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 1, 1, 3, 3);
b_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, -b_.values, b_.values, 1, 4, 3, 6);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += -(b_.values[2 * i + 1]) * a_.values[2 * i + 1];
r_.values[2 * i + 1] += b_.values[2 * i] * a_.values[2 * i + 1];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmlaq_rot90_f32
#define vcmlaq_rot90_f32(r, a, b) simde_vcmlaq_rot90_f32(r, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vcmlaq_rot90_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmlaq_rot90_f64(r, a, b);
#else
simde_float64x2_private
r_ = simde_float64x2_to_private(r),
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
a_.v128 = wasm_i64x2_shuffle(a_.v128, a_.v128, 1, 1);
b_.v128 = wasm_i64x2_shuffle(wasm_f64x2_neg(b_.v128), b_.v128, 1, 2);
r_.v128 = wasm_f64x2_add(r_.v128, wasm_f64x2_mul(b_.v128, a_.v128));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 1, 1);
b_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, -b_.values, b_.values, 1, 2);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += -(b_.values[2 * i + 1]) * a_.values[2 * i + 1];
r_.values[2 * i + 1] += b_.values[2 * i] * a_.values[2 * i + 1];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmlaq_rot90_f64
#define vcmlaq_rot90_f64(r, a, b) simde_vcmlaq_rot90_f64(r, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CMLA_ROT90_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rndp.h | .h | 4,400 | 148 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_RNDP_H)
#define SIMDE_ARM_NEON_RNDP_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vrndp_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
return vrndp_f32(a);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_ceilf(a_.values[i]);
}
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrndp_f32
#define vrndp_f32(a) simde_vrndp_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vrndp_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrndp_f64(a);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_ceil(a_.values[i]);
}
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrndp_f64
#define vrndp_f64(a) simde_vrndp_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vrndpq_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
return vrndpq_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_ceil(a);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_POS_INF);
#elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_ceil_ps(a_.m128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_ceilf(a_.values[i]);
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrndpq_f32
#define vrndpq_f32(a) simde_vrndpq_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vrndpq_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrndpq_f64(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_ceil(a);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_POS_INF);
#elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
r_.m128d = _mm_ceil_pd(a_.m128d);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_math_ceil(a_.values[i]);
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrndpq_f64
#define vrndpq_f64(a) simde_vrndpq_f64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RNDP_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1_x3.h | .h | 10,726 | 288 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_LD1_X3_H)
#define SIMDE_ARM_NEON_LD1_X3_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_
#endif
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2x3_t
simde_vld1_f32_x3(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(6)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_f32_x3(ptr);
#else
simde_float32x2_private a_[3];
for (size_t i = 0; i < 6; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_float32x2x3_t s_ = { { simde_float32x2_from_private(a_[0]),
simde_float32x2_from_private(a_[1]),
simde_float32x2_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_f32_x3
#define vld1_f32_x3(a) simde_vld1_f32_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1x3_t
simde_vld1_f64_x3(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(3)]) {
#if \
defined(SIMDE_ARM_NEON_A64V8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))
return vld1_f64_x3(ptr);
#else
simde_float64x1_private a_[3];
for (size_t i = 0; i < 3; i++) {
a_[i].values[0] = ptr[i];
}
simde_float64x1x3_t s_ = { { simde_float64x1_from_private(a_[0]),
simde_float64x1_from_private(a_[1]),
simde_float64x1_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1_f64_x3
#define vld1_f64_x3(a) simde_vld1_f64_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8x3_t
simde_vld1_s8_x3(int8_t const ptr[HEDLEY_ARRAY_PARAM(24)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(12,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s8_x3(ptr);
#else
simde_int8x8_private a_[3];
for (size_t i = 0; i < 24; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_int8x8x3_t s_ = { { simde_int8x8_from_private(a_[0]),
simde_int8x8_from_private(a_[1]),
simde_int8x8_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s8_x3
#define vld1_s8_x3(a) simde_vld1_s8_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4x3_t
simde_vld1_s16_x3(int16_t const ptr[HEDLEY_ARRAY_PARAM(12)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s16_x3(ptr);
#else
simde_int16x4_private a_[3];
for (size_t i = 0; i < 12; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_int16x4x3_t s_ = { { simde_int16x4_from_private(a_[0]),
simde_int16x4_from_private(a_[1]),
simde_int16x4_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s16_x3
#define vld1_s16_x3(a) simde_vld1_s16_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2x3_t
simde_vld1_s32_x3(int32_t const ptr[HEDLEY_ARRAY_PARAM(6)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(12,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s32_x3(ptr);
#else
simde_int32x2_private a_[3];
for (size_t i = 0; i < 6; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_int32x2x3_t s_ = { { simde_int32x2_from_private(a_[0]),
simde_int32x2_from_private(a_[1]),
simde_int32x2_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s32_x3
#define vld1_s32_x3(a) simde_vld1_s32_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1x3_t
simde_vld1_s64_x3(int64_t const ptr[HEDLEY_ARRAY_PARAM(3)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_s64_x3(ptr);
#else
simde_int64x1_private a_[3];
for (size_t i = 0; i < 3; i++) {
a_[i].values[0] = ptr[i];
}
simde_int64x1x3_t s_ = { { simde_int64x1_from_private(a_[0]),
simde_int64x1_from_private(a_[1]),
simde_int64x1_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s64_x3
#define vld1_s64_x3(a) simde_vld1_s64_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8x3_t
simde_vld1_u8_x3(uint8_t const ptr[HEDLEY_ARRAY_PARAM(24)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u8_x3(ptr);
#else
simde_uint8x8_private a_[3];
for (size_t i = 0; i < 24; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_uint8x8x3_t s_ = { { simde_uint8x8_from_private(a_[0]),
simde_uint8x8_from_private(a_[1]),
simde_uint8x8_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u8_x3
#define vld1_u8_x3(a) simde_vld1_u8_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4x3_t
simde_vld1_u16_x3(uint16_t const ptr[HEDLEY_ARRAY_PARAM(12)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u16_x3(ptr);
#else
simde_uint16x4_private a_[3];
for (size_t i = 0; i < 12; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_uint16x4x3_t s_ = { { simde_uint16x4_from_private(a_[0]),
simde_uint16x4_from_private(a_[1]),
simde_uint16x4_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u16_x3
#define vld1_u16_x3(a) simde_vld1_u16_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2x3_t
simde_vld1_u32_x3(uint32_t const ptr[HEDLEY_ARRAY_PARAM(6)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u32_x3(ptr);
#else
simde_uint32x2_private a_[3];
for (size_t i = 0; i < 6; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_uint32x2x3_t s_ = { { simde_uint32x2_from_private(a_[0]),
simde_uint32x2_from_private(a_[1]),
simde_uint32x2_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u32_x3
#define vld1_u32_x3(a) simde_vld1_u32_x3((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1x3_t
simde_vld1_u64_x3(uint64_t const ptr[HEDLEY_ARRAY_PARAM(3)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1_u64_x3(ptr);
#else
simde_uint64x1_private a_[3];
for (size_t i = 0; i < 3; i++) {
a_[i].values[0] = ptr[i];
}
simde_uint64x1x3_t s_ = { { simde_uint64x1_from_private(a_[0]),
simde_uint64x1_from_private(a_[1]),
simde_uint64x1_from_private(a_[2]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u64_x3
#define vld1_u64_x3(a) simde_vld1_u64_x3((a))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1_X3_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/paddl.h | .h | 10,862 | 348 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_PADDL_H)
#define SIMDE_ARM_NEON_PADDL_H
#include "add.h"
#include "get_high.h"
#include "get_low.h"
#include "movl.h"
#include "movl_high.h"
#include "padd.h"
#include "reinterpret.h"
#include "shl_n.h"
#include "shr_n.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vpaddl_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddl_s8(a);
#else
simde_int16x8_t tmp = simde_vmovl_s8(a);
return simde_vpadd_s16(simde_vget_low_s16(tmp), simde_vget_high_s16(tmp));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddl_s8
#define vpaddl_s8(a) simde_vpaddl_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vpaddl_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddl_s16(a);
#else
simde_int32x4_t tmp = simde_vmovl_s16(a);
return simde_vpadd_s32(simde_vget_low_s32(tmp), simde_vget_high_s32(tmp));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddl_s16
#define vpaddl_s16(a) simde_vpaddl_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vpaddl_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddl_s32(a);
#else
simde_int64x2_t tmp = simde_vmovl_s32(a);
return simde_vadd_s64(simde_vget_low_s64(tmp), simde_vget_high_s64(tmp));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddl_s32
#define vpaddl_s32(a) simde_vpaddl_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vpaddl_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddl_u8(a);
#else
simde_uint16x8_t tmp = simde_vmovl_u8(a);
return simde_vpadd_u16(simde_vget_low_u16(tmp), simde_vget_high_u16(tmp));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddl_u8
#define vpaddl_u8(a) simde_vpaddl_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vpaddl_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddl_u16(a);
#else
simde_uint32x4_t tmp = simde_vmovl_u16(a);
return simde_vpadd_u32(simde_vget_low_u32(tmp), simde_vget_high_u32(tmp));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddl_u16
#define vpaddl_u16(a) simde_vpaddl_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vpaddl_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddl_u32(a);
#else
simde_uint64x2_t tmp = simde_vmovl_u32(a);
return simde_vadd_u64(simde_vget_low_u64(tmp), simde_vget_high_u64(tmp));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddl_u32
#define vpaddl_u32(a) simde_vpaddl_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vpaddlq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddlq_s8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(signed char) one = vec_splat_s8(1);
return
vec_add(
vec_mule(a, one),
vec_mulo(a, one)
);
#elif \
defined(SIMDE_X86_XOP_NATIVE) || \
defined(SIMDE_X86_SSSE3_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_int16x8_private r_;
#if defined(SIMDE_X86_XOP_NATIVE)
r_.m128i = _mm_haddw_epi8(a_.m128i);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_maddubs_epi16(_mm_set1_epi8(INT8_C(1)), a_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_extadd_pairwise_i8x16(a_.v128);
#endif
return simde_int16x8_from_private(r_);
#else
simde_int16x8_t lo = simde_vshrq_n_s16(simde_vshlq_n_s16(simde_vreinterpretq_s16_s8(a), 8), 8);
simde_int16x8_t hi = simde_vshrq_n_s16(simde_vreinterpretq_s16_s8(a), 8);
return simde_vaddq_s16(lo, hi);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddlq_s8
#define vpaddlq_s8(a) simde_vpaddlq_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vpaddlq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddlq_s16(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(signed short) one = vec_splat_s16(1);
return
vec_add(
vec_mule(a, one),
vec_mulo(a, one)
);
#elif \
defined(SIMDE_X86_XOP_NATIVE) || \
defined(SIMDE_X86_SSE2_NATIVE)
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_int32x4_private r_;
#if defined(SIMDE_X86_XOP_NATIVE)
r_.m128i = _mm_haddd_epi16(a_.m128i);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_madd_epi16(a_.m128i, _mm_set1_epi16(INT8_C(1)));
#endif
return simde_int32x4_from_private(r_);
#else
simde_int32x4_t lo = simde_vshrq_n_s32(simde_vshlq_n_s32(simde_vreinterpretq_s32_s16(a), 16), 16);
simde_int32x4_t hi = simde_vshrq_n_s32(simde_vreinterpretq_s32_s16(a), 16);
return simde_vaddq_s32(lo, hi);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddlq_s16
#define vpaddlq_s16(a) simde_vpaddlq_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vpaddlq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddlq_s32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(int) one = vec_splat_s32(1);
return
vec_add(
vec_mule(a, one),
vec_mulo(a, one)
);
#else
simde_int64x2_t lo = simde_vshrq_n_s64(simde_vshlq_n_s64(simde_vreinterpretq_s64_s32(a), 32), 32);
simde_int64x2_t hi = simde_vshrq_n_s64(simde_vreinterpretq_s64_s32(a), 32);
return simde_vaddq_s64(lo, hi);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddlq_s32
#define vpaddlq_s32(a) simde_vpaddlq_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vpaddlq_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddlq_u8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) one = vec_splat_u8(1);
return
vec_add(
vec_mule(a, one),
vec_mulo(a, one)
);
#elif \
defined(SIMDE_X86_XOP_NATIVE) || \
defined(SIMDE_X86_SSSE3_NATIVE)
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
simde_uint16x8_private r_;
#if defined(SIMDE_X86_XOP_NATIVE)
r_.m128i = _mm_haddw_epu8(a_.m128i);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_maddubs_epi16(a_.m128i, _mm_set1_epi8(INT8_C(1)));
#endif
return simde_uint16x8_from_private(r_);
#else
simde_uint16x8_t lo = simde_vshrq_n_u16(simde_vshlq_n_u16(simde_vreinterpretq_u16_u8(a), 8), 8);
simde_uint16x8_t hi = simde_vshrq_n_u16(simde_vreinterpretq_u16_u8(a), 8);
return simde_vaddq_u16(lo, hi);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddlq_u8
#define vpaddlq_u8(a) simde_vpaddlq_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vpaddlq_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddlq_u16(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) one = vec_splat_u16(1);
return
vec_add(
vec_mule(a, one),
vec_mulo(a, one)
);
#elif \
defined(SIMDE_X86_XOP_NATIVE) || \
defined(SIMDE_X86_SSSE3_NATIVE)
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_uint32x4_private r_;
#if defined(SIMDE_X86_XOP_NATIVE)
r_.sse_m128i = _mm_haddd_epu16(a_.sse_m128i);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i =
_mm_add_epi32(
_mm_srli_epi32(a_.m128i, 16),
_mm_and_si128(a_.m128i, _mm_set1_epi32(INT32_C(0x0000ffff)))
);
#endif
return simde_uint32x4_from_private(r_);
#else
simde_uint32x4_t lo = simde_vshrq_n_u32(simde_vshlq_n_u32(simde_vreinterpretq_u32_u16(a), 16), 16);
simde_uint32x4_t hi = simde_vshrq_n_u32(simde_vreinterpretq_u32_u16(a), 16);
return simde_vaddq_u32(lo, hi);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddlq_u16
#define vpaddlq_u16(a) simde_vpaddlq_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vpaddlq_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpaddlq_u32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) one = vec_splat_u32(1);
return
vec_add(
vec_mule(a, one),
vec_mulo(a, one)
);
#elif defined(SIMDE_X86_SSE2_NATIVE)
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_uint64x2_private r_;
r_.m128i =
_mm_add_epi64(
_mm_srli_epi64(a_.m128i, 32),
_mm_and_si128(a_.m128i, _mm_set1_epi64x(INT64_C(0x00000000ffffffff)))
);
return simde_uint64x2_from_private(r_);
#else
simde_uint64x2_t lo = simde_vshrq_n_u64(simde_vshlq_n_u64(simde_vreinterpretq_u64_u32(a), 32), 32);
simde_uint64x2_t hi = simde_vshrq_n_u64(simde_vreinterpretq_u64_u32(a), 32);
return simde_vaddq_u64(lo, hi);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpaddlq_u32
#define vpaddlq_u32(a) simde_vpaddlq_u32((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_NEON_PADDL_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/tbl.h | .h | 8,375 | 244 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_TBL_H)
#define SIMDE_ARM_NEON_TBL_H
#include "reinterpret.h"
#include "combine.h"
#include "get_low.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbl1_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl1_u8(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(simde_vcombine_u8(a, a)),
b_ = simde_uint8x16_to_private(simde_vcombine_u8(b, b));
r_.v128 = wasm_i8x16_swizzle(a_.v128, b_.v128);
r_.v128 = wasm_v128_and(r_.v128, wasm_u8x16_lt(b_.v128, wasm_i8x16_splat(8)));
return simde_vget_low_u8(simde_uint8x16_from_private(r_));
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_or_si64(b_.m64, _mm_cmpgt_pi8(b_.m64, _mm_set1_pi8(7))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] < 8) ? a_.values[b_.values[i]] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl1_u8
#define vtbl1_u8(a, b) simde_vtbl1_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbl1_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl1_s8(a, b);
#else
return simde_vreinterpret_s8_u8(simde_vtbl1_u8(simde_vreinterpret_u8_s8(a), simde_vreinterpret_u8_s8(b)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl1_s8
#define vtbl1_s8(a, b) simde_vtbl1_s8((a), (b))
#endif
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbl2_u8(simde_uint8x8x2_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl2_u8(a, b);
#else
simde_uint8x8_private
r_,
a_[2] = { simde_uint8x8_to_private(a.val[0]), simde_uint8x8_to_private(a.val[1]) },
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i a128 = _mm_set_epi64(a_[1].m64, a_[0].m64);
__m128i b128 = _mm_set1_epi64(b_.m64);
__m128i r128 = _mm_shuffle_epi8(a128, _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(15))));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] < 16) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl2_u8
#define vtbl2_u8(a, b) simde_vtbl2_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbl2_s8(simde_int8x8x2_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl2_s8(a, b);
#else
simde_uint8x8x2_t a_;
simde_memcpy(&a_, &a, sizeof(a_));
return simde_vreinterpret_s8_u8(simde_vtbl2_u8(a_, simde_vreinterpret_u8_s8(b)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl2_s8
#define vtbl2_s8(a, b) simde_vtbl2_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbl3_u8(simde_uint8x8x3_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl3_u8(a, b);
#else
simde_uint8x8_private
r_,
a_[3] = { simde_uint8x8_to_private(a.val[0]), simde_uint8x8_to_private(a.val[1]), simde_uint8x8_to_private(a.val[2]) },
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i b128 = _mm_set1_epi64(b_.m64);
b128 = _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(23)));
__m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(a_[1].m64, a_[0].m64), b128);
__m128i r128_2 = _mm_shuffle_epi8(_mm_set1_epi64(a_[2].m64), b128);
__m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(b128, 3));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] < 24) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl3_u8
#define vtbl3_u8(a, b) simde_vtbl3_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbl3_s8(simde_int8x8x3_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl3_s8(a, b);
#else
simde_uint8x8x3_t a_;
simde_memcpy(&a_, &a, sizeof(a_));
return simde_vreinterpret_s8_u8(simde_vtbl3_u8(a_, simde_vreinterpret_u8_s8(b)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl3_s8
#define vtbl3_s8(a, b) simde_vtbl3_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vtbl4_u8(simde_uint8x8x4_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl4_u8(a, b);
#else
simde_uint8x8_private
r_,
a_[4] = { simde_uint8x8_to_private(a.val[0]), simde_uint8x8_to_private(a.val[1]), simde_uint8x8_to_private(a.val[2]), simde_uint8x8_to_private(a.val[3]) },
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i b128 = _mm_set1_epi64(b_.m64);
b128 = _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(31)));
__m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(a_[1].m64, a_[0].m64), b128);
__m128i r128_23 = _mm_shuffle_epi8(_mm_set_epi64(a_[3].m64, a_[2].m64), b128);
__m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(b128, 3));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] < 32) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl4_u8
#define vtbl4_u8(a, b) simde_vtbl4_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vtbl4_s8(simde_int8x8x4_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtbl4_s8(a, b);
#else
simde_uint8x8x4_t a_;
simde_memcpy(&a_, &a, sizeof(a_));
return simde_vreinterpret_s8_u8(simde_vtbl4_u8(a_, simde_vreinterpret_u8_s8(b)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtbl4_s8
#define vtbl4_s8(a, b) simde_vtbl4_s8((a), (b))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_TBL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlal_n.h | .h | 4,149 | 129 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MLAL_N_H)
#define SIMDE_ARM_NEON_MLAL_N_H
#include "movl.h"
#include "dup_n.h"
#include "mla.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlal_n_s16(simde_int32x4_t a, simde_int16x4_t b, int16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_n_s16(a, b, c);
#else
return simde_vmlaq_s32(a, simde_vmovl_s16(b), simde_vdupq_n_s32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_n_s16
#define vmlal_n_s16(a, b, c) simde_vmlal_n_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmlal_n_s32(simde_int64x2_t a, simde_int32x2_t b, int32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_n_s32(a, b, c);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(simde_vmovl_s32(b)),
c_ = simde_int64x2_to_private(simde_vdupq_n_s64(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = (b_.values * c_.values) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_n_s32
#define vmlal_n_s32(a, b, c) simde_vmlal_n_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlal_n_u16(simde_uint32x4_t a, simde_uint16x4_t b, uint16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_n_u16(a, b, c);
#else
return simde_vmlaq_u32(a, simde_vmovl_u16(b), simde_vdupq_n_u32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_n_u16
#define vmlal_n_u16(a, b, c) simde_vmlal_n_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmlal_n_u32(simde_uint64x2_t a, simde_uint32x2_t b, uint32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_n_u32(a, b, c);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(simde_vmovl_u32(b)),
c_ = simde_uint64x2_to_private(simde_vdupq_n_u64(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = (b_.values * c_.values) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_n_u32
#define vmlal_n_u32(a, b, c) simde_vmlal_n_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLAL_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/hadd.h | .h | 10,597 | 311 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
/* TODO: the 128-bit versions only require AVX-512 because of the final
* conversions from larger types down to smaller ones. We could get
* the same results from AVX/AVX2 instructions with some shuffling
* to extract the low half of each input element to the low half
* of a 256-bit vector, then cast that to a 128-bit vector. */
#if !defined(SIMDE_ARM_NEON_HADD_H)
#define SIMDE_ARM_NEON_HADD_H
#include "addl.h"
#include "shr_n.h"
#include "movn.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vhadd_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhadd_s8(a, b);
#else
return simde_vmovn_s16(simde_vshrq_n_s16(simde_vaddl_s8(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhadd_s8
#define vhadd_s8(a, b) simde_vhadd_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vhadd_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhadd_s16(a, b);
#else
return simde_vmovn_s32(simde_vshrq_n_s32(simde_vaddl_s16(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhadd_s16
#define vhadd_s16(a, b) simde_vhadd_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vhadd_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhadd_s32(a, b);
#else
return simde_vmovn_s64(simde_vshrq_n_s64(simde_vaddl_s32(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhadd_s32
#define vhadd_s32(a, b) simde_vhadd_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vhadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhadd_u8(a, b);
#else
return simde_vmovn_u16(simde_vshrq_n_u16(simde_vaddl_u8(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhadd_u8
#define vhadd_u8(a, b) simde_vhadd_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vhadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhadd_u16(a, b);
#else
return simde_vmovn_u32(simde_vshrq_n_u32(simde_vaddl_u16(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhadd_u16
#define vhadd_u16(a, b) simde_vhadd_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vhadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhadd_u32(a, b);
#else
return simde_vmovn_u64(simde_vshrq_n_u64(simde_vaddl_u32(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhadd_u32
#define vhadd_u32(a, b) simde_vhadd_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vhaddq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhaddq_s8(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
r_.m128i = _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_add_epi16(_mm256_cvtepi8_epi16(a_.m128i), _mm256_cvtepi8_epi16(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) + HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1);
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhaddq_s8
#define vhaddq_s8(a, b) simde_vhaddq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vhaddq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhaddq_s16(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_add_epi32(_mm256_cvtepi16_epi32(a_.m128i), _mm256_cvtepi16_epi32(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) + HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1);
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhaddq_s16
#define vhaddq_s16(a, b) simde_vhaddq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vhaddq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhaddq_s32(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_add_epi64(_mm256_cvtepi32_epi64(a_.m128i), _mm256_cvtepi32_epi64(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) + HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1);
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhaddq_s32
#define vhaddq_s32(a, b) simde_vhaddq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vhaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhaddq_u8(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
r_.m128i = _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_add_epi16(_mm256_cvtepu8_epi16(a_.m128i), _mm256_cvtepu8_epi16(b_.m128i)), 1));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t lo =
wasm_u16x8_shr(wasm_i16x8_add(wasm_u16x8_extend_low_u8x16(a_.v128),
wasm_u16x8_extend_low_u8x16(b_.v128)),
1);
v128_t hi =
wasm_u16x8_shr(wasm_i16x8_add(wasm_u16x8_extend_high_u8x16(a_.v128),
wasm_u16x8_extend_high_u8x16(b_.v128)),
1);
r_.v128 = wasm_i8x16_shuffle(lo, hi, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
22, 24, 26, 28, 30);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhaddq_u8
#define vhaddq_u8(a, b) simde_vhaddq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vhaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhaddq_u16(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_add_epi32(_mm256_cvtepu16_epi32(a_.m128i), _mm256_cvtepu16_epi32(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhaddq_u16
#define vhaddq_u16(a, b) simde_vhaddq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vhaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhaddq_u32(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_add_epi64(_mm256_cvtepu32_epi64(a_.m128i), _mm256_cvtepu32_epi64(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhaddq_u32
#define vhaddq_u32(a, b) simde_vhaddq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_HADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cage.h | .h | 5,902 | 190 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_CAGE_H)
#define SIMDE_ARM_NEON_CAGE_H
#include "types.h"
#include "abs.h"
#include "cge.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vcageh_f16(simde_float16_t a, simde_float16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vcageh_f16(a, b);
#else
simde_float32_t a_ = simde_float16_to_float32(a);
simde_float32_t b_ = simde_float16_to_float32(b);
return (simde_math_fabsf(a_) >= simde_math_fabsf(b_)) ? UINT16_MAX : UINT16_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcageh_f16
#define vcageh_f16(a, b) simde_vcageh_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vcages_f32(simde_float32_t a, simde_float32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcages_f32(a, b);
#else
return (simde_math_fabsf(a) >= simde_math_fabsf(b)) ? ~UINT32_C(0) : UINT32_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcages_f32
#define vcages_f32(a, b) simde_vcages_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vcaged_f64(simde_float64_t a, simde_float64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcaged_f64(a, b);
#else
return (simde_math_fabs(a) >= simde_math_fabs(b)) ? ~UINT64_C(0) : UINT64_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcaged_f64
#define vcaged_f64(a, b) simde_vcaged_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vcage_f16(simde_float16x4_t a, simde_float16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vcage_f16(a, b);
#else
simde_float16x4_private
a_ = simde_float16x4_to_private(a),
b_ = simde_float16x4_to_private(b);
simde_uint16x4_private r_;
SIMDE_VECTORIZE
for(size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcageh_f16(a_.values[i], b_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcage_f16
#define vcage_f16(a, b) simde_vcage_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcage_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcage_f32(a, b);
#else
return simde_vcge_f32(simde_vabs_f32(a), simde_vabs_f32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcage_f32
#define vcage_f32(a, b) simde_vcage_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcage_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcage_f64(a, b);
#else
return simde_vcge_f64(simde_vabs_f64(a), simde_vabs_f64(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcage_f64
#define vcage_f64(a, b) simde_vcage_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vcageq_f16(simde_float16x8_t a, simde_float16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vcageq_f16(a, b);
#else
simde_float16x8_private
a_ = simde_float16x8_to_private(a),
b_ = simde_float16x8_to_private(b);
simde_uint16x8_private r_;
SIMDE_VECTORIZE
for(size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcageh_f16(a_.values[i], b_.values[i]);
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcageq_f16
#define vcageq_f16(a, b) simde_vcageq_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcageq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcageq_f32(a, b);
#else
return simde_vcgeq_f32(simde_vabsq_f32(a), simde_vabsq_f32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcageq_f32
#define vcageq_f32(a, b) simde_vcageq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcageq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcageq_f64(a, b);
#else
return simde_vcgeq_f64(simde_vabsq_f64(a), simde_vabsq_f64(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcageq_f64
#define vcageq_f64(a, b) simde_vcageq_f64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CAGE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/fma_n.h | .h | 3,660 | 98 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_FMA_N_H)
#define SIMDE_ARM_NEON_FMA_N_H
#include "types.h"
#include "dup_n.h"
#include "fma.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vfma_n_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) && !defined(SIMDE_BUG_GCC_95399)
return vfma_n_f32(a, b, c);
#else
return simde_vfma_f32(a, b, simde_vdup_n_f32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfma_n_f32
#define vfma_n_f32(a, b, c) simde_vfma_n_f32(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vfma_n_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))
return vfma_n_f64(a, b, c);
#else
return simde_vfma_f64(a, b, simde_vdup_n_f64(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfma_n_f64
#define vfma_n_f64(a, b, c) simde_vfma_n_f64(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vfmaq_n_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) && !defined(SIMDE_BUG_GCC_95399)
return vfmaq_n_f32(a, b, c);
#else
return simde_vfmaq_f32(a, b, simde_vdupq_n_f32(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfmaq_n_f32
#define vfmaq_n_f32(a, b, c) simde_vfmaq_n_f32(a, b, c)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vfmaq_n_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))
return vfmaq_n_f64(a, b, c);
#else
return simde_vfmaq_f64(a, b, simde_vdupq_n_f64(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vfmaq_n_f64
#define vfmaq_n_f64(a, b, c) simde_vfmaq_n_f64(a, b, c)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CMLA_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/addhn.h | .h | 7,275 | 212 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_ADDHN_H)
#define SIMDE_ARM_NEON_ADDHN_H
#include "add.h"
#include "shr_n.h"
#include "movn.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vaddhn_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddhn_s16(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_int8x8_private r_;
simde_int8x16_private tmp_ =
simde_int8x16_to_private(
simde_vreinterpretq_s8_s16(
simde_vaddq_s16(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#endif
return simde_int8x8_from_private(r_);
#else
return simde_vmovn_s16(simde_vshrq_n_s16(simde_vaddq_s16(a, b), 8));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddhn_s16
#define vaddhn_s16(a, b) simde_vaddhn_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vaddhn_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddhn_s32(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_int16x4_private r_;
simde_int16x8_private tmp_ =
simde_int16x8_to_private(
simde_vreinterpretq_s16_s32(
simde_vaddq_s32(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6);
#endif
return simde_int16x4_from_private(r_);
#else
return simde_vmovn_s32(simde_vshrq_n_s32(simde_vaddq_s32(a, b), 16));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddhn_s32
#define vaddhn_s32(a, b) simde_vaddhn_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vaddhn_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddhn_s64(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_int32x2_private r_;
simde_int32x4_private tmp_ =
simde_int32x4_to_private(
simde_vreinterpretq_s32_s64(
simde_vaddq_s64(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2);
#endif
return simde_int32x2_from_private(r_);
#else
return simde_vmovn_s64(simde_vshrq_n_s64(simde_vaddq_s64(a, b), 32));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddhn_s64
#define vaddhn_s64(a, b) simde_vaddhn_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vaddhn_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddhn_u16(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_uint8x8_private r_;
simde_uint8x16_private tmp_ =
simde_uint8x16_to_private(
simde_vreinterpretq_u8_u16(
simde_vaddq_u16(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#endif
return simde_uint8x8_from_private(r_);
#else
return simde_vmovn_u16(simde_vshrq_n_u16(simde_vaddq_u16(a, b), 8));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddhn_u16
#define vaddhn_u16(a, b) simde_vaddhn_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vaddhn_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddhn_u32(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_uint16x4_private r_;
simde_uint16x8_private tmp_ =
simde_uint16x8_to_private(
simde_vreinterpretq_u16_u32(
simde_vaddq_u32(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6);
#endif
return simde_uint16x4_from_private(r_);
#else
return simde_vmovn_u32(simde_vshrq_n_u32(simde_vaddq_u32(a, b), 16));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddhn_u32
#define vaddhn_u32(a, b) simde_vaddhn_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vaddhn_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddhn_u64(a, b);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
simde_uint32x2_private r_;
simde_uint32x4_private tmp_ =
simde_uint32x4_to_private(
simde_vreinterpretq_u32_u64(
simde_vaddq_u64(a, b)
)
);
#if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3);
#else
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2);
#endif
return simde_uint32x2_from_private(r_);
#else
return simde_vmovn_u64(simde_vshrq_n_u64(simde_vaddq_u64(a, b), 32));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddhn_u64
#define vaddhn_u64(a, b) simde_vaddhn_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ADDHN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rsra_n.h | .h | 8,089 | 230 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_RSRA_N_H)
#define SIMDE_ARM_NEON_RSRA_N_H
#include "add.h"
#include "combine.h"
#include "get_low.h"
#include "rshr_n.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
/* Remark: For these instructions
* 1 <= n <= data element size in bits
* so 0 <= n - 1 < data element size in bits
*/
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vrsrad_n_s64(a, b, n) vrsrad_n_s64(a, b, n)
#else
#define simde_vrsrad_n_s64(a, b, n) simde_vaddd_s64((a), simde_vrshrd_n_s64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrsrad_n_s64
#define vrsrad_n_s64(a, b, n) simde_vrsrad_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vrsrad_n_u64(a, b, n) vrsrad_n_u64(a, b, n)
#else
#define simde_vrsrad_n_u64(a, b, n) simde_vaddd_u64((a), simde_vrshrd_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrsrad_n_u64
#define vrsrad_n_u64(a, b, n) simde_vrsrad_n_u64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_s8(a, b, n) vrsraq_n_s8((a), (b), (n))
#else
#define simde_vrsraq_n_s8(a, b, n) simde_vaddq_s8((a), simde_vrshrq_n_s8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_s8
#define vrsraq_n_s8(a, b, n) simde_vrsraq_n_s8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_s16(a, b, n) vrsraq_n_s16((a), (b), (n))
#else
#define simde_vrsraq_n_s16(a, b, n) simde_vaddq_s16((a), simde_vrshrq_n_s16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_s16
#define vrsraq_n_s16(a, b, n) simde_vrsraq_n_s16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_s32(a, b, n) vrsraq_n_s32((a), (b), (n))
#else
#define simde_vrsraq_n_s32(a, b, n) simde_vaddq_s32((a), simde_vrshrq_n_s32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_s32
#define vrsraq_n_s32(a, b, n) simde_vrsraq_n_s32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_s64(a, b, n) vrsraq_n_s64((a), (b), (n))
#else
#define simde_vrsraq_n_s64(a, b, n) simde_vaddq_s64((a), simde_vrshrq_n_s64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_s64
#define vrsraq_n_s64(a, b, n) simde_vrsraq_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_u8(a, b, n) vrsraq_n_u8((a), (b), (n))
#else
#define simde_vrsraq_n_u8(a, b, n) simde_vaddq_u8((a), simde_vrshrq_n_u8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_u8
#define vrsraq_n_u8(a, b, n) simde_vrsraq_n_u8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_u16(a, b, n) vrsraq_n_u16((a), (b), (n))
#else
#define simde_vrsraq_n_u16(a, b, n) simde_vaddq_u16((a), simde_vrshrq_n_u16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_u16
#define vrsraq_n_u16(a, b, n) simde_vrsraq_n_u16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_u32(a, b, n) vrsraq_n_u32((a), (b), (n))
#else
#define simde_vrsraq_n_u32(a, b, n) simde_vaddq_u32((a), simde_vrshrq_n_u32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_u32
#define vrsraq_n_u32(a, b, n) simde_vrsraq_n_u32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsraq_n_u64(a, b, n) vrsraq_n_u64((a), (b), (n))
#else
#define simde_vrsraq_n_u64(a, b, n) simde_vaddq_u64((a), simde_vrshrq_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsraq_n_u64
#define vrsraq_n_u64(a, b, n) simde_vrsraq_n_u64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_s8(a, b, n) vrsra_n_s8((a), (b), (n))
#else
#define simde_vrsra_n_s8(a, b, n) simde_vadd_s8((a), simde_vrshr_n_s8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_s8
#define vrsra_n_s8(a, b, n) simde_vrsra_n_s8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_s16(a, b, n) vrsra_n_s16((a), (b), (n))
#else
#define simde_vrsra_n_s16(a, b, n) simde_vadd_s16((a), simde_vrshr_n_s16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_s16
#define vrsra_n_s16(a, b, n) simde_vrsra_n_s16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_s32(a, b, n) vrsra_n_s32((a), (b), (n))
#else
#define simde_vrsra_n_s32(a, b, n) simde_vadd_s32((a), simde_vrshr_n_s32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_s32
#define vrsra_n_s32(a, b, n) simde_vrsra_n_s32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_s64(a, b, n) vrsra_n_s64((a), (b), (n))
#else
#define simde_vrsra_n_s64(a, b, n) simde_vadd_s64((a), simde_vrshr_n_s64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_s64
#define vrsra_n_s64(a, b, n) simde_vrsra_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_u8(a, b, n) vrsra_n_u8((a), (b), (n))
#else
#define simde_vrsra_n_u8(a, b, n) simde_vadd_u8((a), simde_vrshr_n_u8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_u8
#define vrsra_n_u8(a, b, n) simde_vrsra_n_u8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_u16(a, b, n) vrsra_n_u16((a), (b), (n))
#else
#define simde_vrsra_n_u16(a, b, n) simde_vadd_u16((a), simde_vrshr_n_u16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_u16
#define vrsra_n_u16(a, b, n) simde_vrsra_n_u16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_u32(a, b, n) vrsra_n_u32((a), (b), (n))
#else
#define simde_vrsra_n_u32(a, b, n) simde_vadd_u32((a), simde_vrshr_n_u32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_u32
#define vrsra_n_u32(a, b, n) simde_vrsra_n_u32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vrsra_n_u64(a, b, n) vrsra_n_u64((a), (b), (n))
#else
#define simde_vrsra_n_u64(a, b, n) simde_vadd_u64((a), simde_vrshr_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrsra_n_u64
#define vrsra_n_u64(a, b, n) simde_vrsra_n_u64((a), (b), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RSRA_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qmovun.h | .h | 5,196 | 160 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_QMOVUN_H)
#define SIMDE_ARM_NEON_QMOVUN_H
#include "types.h"
#include "dup_n.h"
#include "min.h"
#include "max.h"
#include "movn.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vqmovunh_s16(int16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint8_t, vqmovunh_s16(a));
#else
return (a > UINT8_MAX) ? UINT8_MAX : ((a < 0) ? 0 : HEDLEY_STATIC_CAST(uint8_t, a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovunh_s16
#define vqmovunh_s16(a) simde_vqmovunh_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vqmovuns_s32(int32_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint16_t, vqmovuns_s32(a));
#else
return (a > UINT16_MAX) ? UINT16_MAX : ((a < 0) ? 0 : HEDLEY_STATIC_CAST(uint16_t, a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovuns_s32
#define vqmovuns_s32(a) simde_vqmovuns_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vqmovund_s64(int64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint32_t, vqmovund_s64(a));
#else
return (a > UINT32_MAX) ? UINT32_MAX : ((a < 0) ? 0 : HEDLEY_STATIC_CAST(uint32_t, a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovund_s64
#define vqmovund_s64(a) simde_vqmovund_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqmovun_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovun_s16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_u16(simde_vreinterpretq_u16_s16(simde_vmaxq_s16(simde_vdupq_n_s16(0), simde_vminq_s16(simde_vdupq_n_s16(UINT8_MAX), a))));
#else
simde_uint8x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovunh_s16(a_.values[i]);
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovun_s16
#define vqmovun_s16(a) simde_vqmovun_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vqmovun_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovun_s32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_u32(simde_vreinterpretq_u32_s32(simde_vmaxq_s32(simde_vdupq_n_s32(0), simde_vminq_s32(simde_vdupq_n_s32(UINT16_MAX), a))));
#else
simde_uint16x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovuns_s32(a_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovun_s32
#define vqmovun_s32(a) simde_vqmovun_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vqmovun_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovun_s64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_u64(simde_vreinterpretq_u64_s64(simde_x_vmaxq_s64(simde_vdupq_n_s64(0), simde_x_vminq_s64(simde_vdupq_n_s64(UINT32_MAX), a))));
#else
simde_uint32x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovund_s64(a_.values[i]);
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovun_s64
#define vqmovun_s64(a) simde_vqmovun_s64((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QMOVUN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mlal.h | .h | 5,025 | 157 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MLAL_H)
#define SIMDE_ARM_NEON_MLAL_H
#include "movl.h"
#include "mla.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlal_s8(simde_int16x8_t a, simde_int8x8_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_s8(a, b, c);
#else
return simde_vmlaq_s16(a, simde_vmovl_s8(b), simde_vmovl_s8(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_s8
#define vmlal_s8(a, b, c) simde_vmlal_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlal_s16(simde_int32x4_t a, simde_int16x4_t b, simde_int16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_s16(a, b, c);
#else
return simde_vmlaq_s32(a, simde_vmovl_s16(b), simde_vmovl_s16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_s16
#define vmlal_s16(a, b, c) simde_vmlal_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmlal_s32(simde_int64x2_t a, simde_int32x2_t b, simde_int32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_s32(a, b, c);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(simde_vmovl_s32(b)),
c_ = simde_int64x2_to_private(simde_vmovl_s32(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = (b_.values * c_.values) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_s32
#define vmlal_s32(a, b, c) simde_vmlal_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlal_u8(simde_uint16x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_u8(a, b, c);
#else
return simde_vmlaq_u16(a, simde_vmovl_u8(b), simde_vmovl_u8(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_u8
#define vmlal_u8(a, b, c) simde_vmlal_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlal_u16(simde_uint32x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_u16(a, b, c);
#else
return simde_vmlaq_u32(a, simde_vmovl_u16(b), simde_vmovl_u16(c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_u16
#define vmlal_u16(a, b, c) simde_vmlal_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmlal_u32(simde_uint64x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlal_u32(a, b, c);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(simde_vmovl_u32(b)),
c_ = simde_uint64x2_to_private(simde_vmovl_u32(c));
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = (b_.values * c_.values) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlal_u32
#define vmlal_u32(a, b, c) simde_vmlal_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLAL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/sri_n.h | .h | 9,661 | 273 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_SRI_N_H)
#define SIMDE_ARM_NEON_SRI_N_H
#include "types.h"
#include "shr_n.h"
#include "dup_n.h"
#include "and.h"
#include "orr.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vsrid_n_s64(a, b, n) vsrid_n_s64(a, b, n)
#else
#define simde_vsrid_n_s64(a, b, n) \
HEDLEY_STATIC_CAST(int64_t, \
simde_vsrid_n_u64(HEDLEY_STATIC_CAST(uint64_t, a), HEDLEY_STATIC_CAST(uint64_t, b), n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsrid_n_s64
#define vsrid_n_s64(a, b, n) simde_vsrid_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vsrid_n_u64(a, b, n) vsrid_n_u64(a, b, n)
#else
#define simde_vsrid_n_u64(a, b, n) \
(((a & (UINT64_C(0xffffffffffffffff) >> (64 - n) << (64 - n))) | simde_vshrd_n_u64((b), (n))))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsrid_n_u64
#define vsrid_n_u64(a, b, n) simde_vsrid_n_u64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_s8(a, b, n) vsri_n_s8((a), (b), (n))
#else
#define simde_vsri_n_s8(a, b, n) \
simde_vreinterpret_s8_u8(simde_vsri_n_u8( \
simde_vreinterpret_u8_s8((a)), simde_vreinterpret_u8_s8((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_s8
#define vsri_n_s8(a, b, n) simde_vsri_n_s8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_u8(a, b, n) vsri_n_u8((a), (b), (n))
#else
#define simde_vsri_n_u8(a, b, n) \
simde_vorr_u8( \
simde_vand_u8((a), simde_vdup_n_u8((UINT8_C(0xff) >> (8 - n) << (8 - n)))), \
simde_vshr_n_u8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_u8
#define vsri_n_u8(a, b, n) simde_vsri_n_u8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_s16(a, b, n) vsri_n_s16((a), (b), (n))
#else
#define simde_vsri_n_s16(a, b, n) \
simde_vreinterpret_s16_u16(simde_vsri_n_u16( \
simde_vreinterpret_u16_s16((a)), simde_vreinterpret_u16_s16((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_s16
#define vsri_n_s16(a, b, n) simde_vsri_n_s16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_u16(a, b, n) vsri_n_u16((a), (b), (n))
#else
#define simde_vsri_n_u16(a, b, n) \
simde_vorr_u16( \
simde_vand_u16((a), simde_vdup_n_u16((UINT16_C(0xffff) >> (16 - n) << (16 - n)))), \
simde_vshr_n_u16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_u16
#define vsri_n_u16(a, b, n) simde_vsri_n_u16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_s32(a, b, n) vsri_n_s32((a), (b), (n))
#else
#define simde_vsri_n_s32(a, b, n) \
simde_vreinterpret_s32_u32(simde_vsri_n_u32( \
simde_vreinterpret_u32_s32((a)), simde_vreinterpret_u32_s32((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_s32
#define vsri_n_s32(a, b, n) simde_vsri_n_s32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_u32(a, b, n) vsri_n_u32((a), (b), (n))
#else
#define simde_vsri_n_u32(a, b, n) \
simde_vorr_u32( \
simde_vand_u32((a), \
simde_vdup_n_u32((UINT32_C(0xffffffff) >> (32 - n) << (32 - n)))), \
simde_vshr_n_u32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_u32
#define vsri_n_u32(a, b, n) simde_vsri_n_u32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_s64(a, b, n) vsri_n_s64((a), (b), (n))
#else
#define simde_vsri_n_s64(a, b, n) \
simde_vreinterpret_s64_u64(simde_vsri_n_u64( \
simde_vreinterpret_u64_s64((a)), simde_vreinterpret_u64_s64((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_s64
#define vsri_n_s64(a, b, n) simde_vsri_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsri_n_u64(a, b, n) vsri_n_u64((a), (b), (n))
#else
#define simde_vsri_n_u64(a, b, n) \
simde_vorr_u64( \
simde_vand_u64((a), simde_vdup_n_u64( \
(UINT64_C(0xffffffffffffffff) >> (64 - n) << (64 - n)))), \
simde_vshr_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsri_n_u64
#define vsri_n_u64(a, b, n) simde_vsri_n_u64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_s8(a, b, n) vsriq_n_s8((a), (b), (n))
#else
#define simde_vsriq_n_s8(a, b, n) \
simde_vreinterpretq_s8_u8(simde_vsriq_n_u8( \
simde_vreinterpretq_u8_s8((a)), simde_vreinterpretq_u8_s8((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_s8
#define vsriq_n_s8(a, b, n) simde_vsriq_n_s8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_u8(a, b, n) vsriq_n_u8((a), (b), (n))
#else
#define simde_vsriq_n_u8(a, b, n) \
simde_vorrq_u8( \
simde_vandq_u8((a), simde_vdupq_n_u8((UINT8_C(0xff) >> (8 - n) << (8 - n)))), \
simde_vshrq_n_u8((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_u8
#define vsriq_n_u8(a, b, n) simde_vsriq_n_u8((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_s16(a, b, n) vsriq_n_s16((a), (b), (n))
#else
#define simde_vsriq_n_s16(a, b, n) \
simde_vreinterpretq_s16_u16(simde_vsriq_n_u16( \
simde_vreinterpretq_u16_s16((a)), simde_vreinterpretq_u16_s16((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_s16
#define vsriq_n_s16(a, b, n) simde_vsriq_n_s16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_u16(a, b, n) vsriq_n_u16((a), (b), (n))
#else
#define simde_vsriq_n_u16(a, b, n) \
simde_vorrq_u16( \
simde_vandq_u16((a), simde_vdupq_n_u16((UINT16_C(0xffff) >> (16 - n) << (16 - n)))), \
simde_vshrq_n_u16((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_u16
#define vsriq_n_u16(a, b, n) simde_vsriq_n_u16((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_s32(a, b, n) vsriq_n_s32((a), (b), (n))
#else
#define simde_vsriq_n_s32(a, b, n) \
simde_vreinterpretq_s32_u32(simde_vsriq_n_u32( \
simde_vreinterpretq_u32_s32((a)), simde_vreinterpretq_u32_s32((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_s32
#define vsriq_n_s32(a, b, n) simde_vsriq_n_s32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_u32(a, b, n) vsriq_n_u32((a), (b), (n))
#else
#define simde_vsriq_n_u32(a, b, n) \
simde_vorrq_u32( \
simde_vandq_u32((a), \
simde_vdupq_n_u32((UINT32_C(0xffffffff) >> (32 - n) << (32 - n)))), \
simde_vshrq_n_u32((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_u32
#define vsriq_n_u32(a, b, n) simde_vsriq_n_u32((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_s64(a, b, n) vsriq_n_s64((a), (b), (n))
#else
#define simde_vsriq_n_s64(a, b, n) \
simde_vreinterpretq_s64_u64(simde_vsriq_n_u64( \
simde_vreinterpretq_u64_s64((a)), simde_vreinterpretq_u64_s64((b)), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_s64
#define vsriq_n_s64(a, b, n) simde_vsriq_n_s64((a), (b), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vsriq_n_u64(a, b, n) vsriq_n_u64((a), (b), (n))
#else
#define simde_vsriq_n_u64(a, b, n) \
simde_vorrq_u64( \
simde_vandq_u64((a), simde_vdupq_n_u64( \
(UINT64_C(0xffffffffffffffff) >> (64 - n) << (64 - n)))), \
simde_vshrq_n_u64((b), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsriq_n_u64
#define vsriq_n_u64(a, b, n) simde_vsriq_n_u64((a), (b), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SRI_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qrshrun_n.h | .h | 3,259 | 92 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_QRSHRUN_N_H)
#define SIMDE_ARM_NEON_QRSHRUN_N_H
#include "types.h"
#include "rshr_n.h"
#include "qmovun.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqrshruns_n_s32(a, n) vqrshruns_n_s32(a, n)
#else
#define simde_vqrshruns_n_s32(a, n) simde_vqmovuns_s32(simde_x_vrshrs_n_s32(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrshruns_n_s32
#define vqrshruns_n_s32(a, n) simde_vqrshruns_n_s32((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqrshrund_n_s64(a, n) vqrshrund_n_s64(a, n)
#else
#define simde_vqrshrund_n_s64(a, n) simde_vqmovund_s64(simde_vrshrd_n_s64(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrshrund_n_s64
#define vqrshrund_n_s64(a, n) simde_vqrshrund_n_s64((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrshrun_n_s16(a, n) vqrshrun_n_s16((a), (n))
#else
#define simde_vqrshrun_n_s16(a, n) simde_vqmovun_s16(simde_vrshrq_n_s16(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrshrun_n_s16
#define vqrshrun_n_s16(a, n) simde_vqrshrun_n_s16((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrshrun_n_s32(a, n) vqrshrun_n_s32((a), (n))
#else
#define simde_vqrshrun_n_s32(a, n) simde_vqmovun_s32(simde_vrshrq_n_s32(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrshrun_n_s32
#define vqrshrun_n_s32(a, n) simde_vqrshrun_n_s32((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrshrun_n_s64(a, n) vqrshrun_n_s64((a), (n))
#else
#define simde_vqrshrun_n_s64(a, n) simde_vqmovun_s64(simde_vrshrq_n_s64(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrshrun_n_s64
#define vqrshrun_n_s64(a, n) simde_vqrshrun_n_s64((a), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QRSHRUN_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/uzp1.h | .h | 21,179 | 644 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_UZP1_H)
#define SIMDE_ARM_NEON_UZP1_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vuzp1_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_f32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2x2_t t = vuzp_f32(a, b);
return t.val[0];
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_f32
#define vuzp1_f32(a, b) simde_vuzp1_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vuzp1_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_s8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x8x2_t t = vuzp_s8(a, b);
return t.val[0];
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_s8
#define vuzp1_s8(a, b) simde_vuzp1_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vuzp1_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_s16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x4x2_t t = vuzp_s16(a, b);
return t.val[0];
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 2, 4, 6);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_s16
#define vuzp1_s16(a, b) simde_vuzp1_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vuzp1_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_s32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x2x2_t t = vuzp_s32(a, b);
return t.val[0];
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_s32
#define vuzp1_s32(a, b) simde_vuzp1_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vuzp1_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_u8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x2_t t = vuzp_u8(a, b);
return t.val[0];
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_u8
#define vuzp1_u8(a, b) simde_vuzp1_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vuzp1_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_u16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4x2_t t = vuzp_u16(a, b);
return t.val[0];
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 2, 4, 6);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_u16
#define vuzp1_u16(a, b) simde_vuzp1_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vuzp1_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1_u32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x2x2_t t = vuzp_u32(a, b);
return t.val[0];
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1_u32
#define vuzp1_u32(a, b) simde_vuzp1_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vuzp1q_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_f32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4x2_t t = vuzpq_f32(a, b);
return t.val[0];
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 2, 4, 6);
#elif defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_shuffle_ps(a_.m128, b_.m128, 0x88);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 2, 4, 6);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_f32
#define vuzp1q_f32(a, b) simde_vuzp1q_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vuzp1q_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_f64(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128d = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(a_.m128d), _mm_castpd_ps(b_.m128d)));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_f64
#define vuzp1q_f64(a, b) simde_vuzp1q_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vuzp1q_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_s8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x16x2_t t = vuzpq_s8(a, b);
return t.val[0];
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_s8
#define vuzp1q_s8(a, b) simde_vuzp1q_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vuzp1q_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_s16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x8x2_t t = vuzpq_s16(a, b);
return t.val[0];
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_s16
#define vuzp1q_s16(a, b) simde_vuzp1q_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vuzp1q_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_s32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4x2_t t = vuzpq_s32(a, b);
return t.val[0];
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 2, 4, 6);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i), 0x88));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 2, 4, 6);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_s32
#define vuzp1q_s32(a, b) simde_vuzp1q_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vuzp1q_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_s64(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i)));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_s64
#define vuzp1q_s64(a, b) simde_vuzp1q_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vuzp1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_u8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x16x2_t t = vuzpq_u8(a, b);
return t.val[0];
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_u8
#define vuzp1q_u8(a, b) simde_vuzp1q_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vuzp1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_u16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x8x2_t t = vuzpq_u16(a, b);
return t.val[0];
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_u16
#define vuzp1q_u16(a, b) simde_vuzp1q_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vuzp1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_u32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4x2_t t = vuzpq_u32(a, b);
return t.val[0];
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 2, 4, 6);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i), 0x88));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 2, 4, 6);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_u32
#define vuzp1q_u32(a, b) simde_vuzp1q_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vuzp1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vuzp1q_u64(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2);
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* _mm_movelh_ps?!?! SSE is weird. */
r_.m128i = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i)));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
const size_t idx = i << 1;
r_.values[ i ] = a_.values[idx];
r_.values[i + halfway_point] = b_.values[idx];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vuzp1q_u64
#define vuzp1q_u64(a, b) simde_vuzp1q_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_UZP1_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/subl.h | .h | 3,912 | 128 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_SUBL_H)
#define SIMDE_ARM_NEON_SUBL_H
#include "sub.h"
#include "movl.h"
#include "movl_high.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vsubl_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubl_s8(a, b);
#else
return simde_vsubq_s16(simde_vmovl_s8(a), simde_vmovl_s8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubl_s8
#define vsubl_s8(a, b) simde_vsubl_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vsubl_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubl_s16(a, b);
#else
return simde_vsubq_s32(simde_vmovl_s16(a), simde_vmovl_s16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubl_s16
#define vsubl_s16(a, b) simde_vsubl_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vsubl_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubl_s32(a, b);
#else
return simde_vsubq_s64(simde_vmovl_s32(a), simde_vmovl_s32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubl_s32
#define vsubl_s32(a, b) simde_vsubl_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vsubl_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubl_u8(a, b);
#else
return simde_vsubq_u16(simde_vmovl_u8(a), simde_vmovl_u8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubl_u8
#define vsubl_u8(a, b) simde_vsubl_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vsubl_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubl_u16(a, b);
#else
return simde_vsubq_u32(simde_vmovl_u16(a), simde_vmovl_u16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubl_u16
#define vsubl_u16(a, b) simde_vsubl_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vsubl_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubl_u32(a, b);
#else
return simde_vsubq_u64(simde_vmovl_u32(a), simde_vmovl_u32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubl_u32
#define vsubl_u32(a, b) simde_vsubl_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SUBL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1_dup.h | .h | 11,068 | 408 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_LD1_DUP_H)
#define SIMDE_ARM_NEON_LD1_DUP_H
#include "dup_n.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vld1_dup_f32(simde_float32 const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_f32(ptr);
#else
return simde_vdup_n_f32(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_f32
#define vld1_dup_f32(a) simde_vld1_dup_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vld1_dup_f64(simde_float64 const * ptr) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vld1_dup_f64(ptr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde_vreinterpret_f64_s64(vld1_dup_s64(HEDLEY_REINTERPRET_CAST(int64_t const*, ptr)));
#else
return simde_vdup_n_f64(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_f64
#define vld1_dup_f64(a) simde_vld1_dup_f64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vld1_dup_s8(int8_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_s8(ptr);
#else
return simde_vdup_n_s8(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_s8
#define vld1_dup_s8(a) simde_vld1_dup_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vld1_dup_s16(int16_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_s16(ptr);
#else
return simde_vdup_n_s16(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_s16
#define vld1_dup_s16(a) simde_vld1_dup_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vld1_dup_s32(int32_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_s32(ptr);
#else
return simde_vdup_n_s32(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_s32
#define vld1_dup_s32(a) simde_vld1_dup_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vld1_dup_s64(int64_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_s64(ptr);
#else
return simde_vdup_n_s64(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_s64
#define vld1_dup_s64(a) simde_vld1_dup_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vld1_dup_u8(uint8_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_u8(ptr);
#else
return simde_vdup_n_u8(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_u8
#define vld1_dup_u8(a) simde_vld1_dup_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vld1_dup_u16(uint16_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_u16(ptr);
#else
return simde_vdup_n_u16(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_u16
#define vld1_dup_u16(a) simde_vld1_dup_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vld1_dup_u32(uint32_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_u32(ptr);
#else
return simde_vdup_n_u32(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_u32
#define vld1_dup_u32(a) simde_vld1_dup_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vld1_dup_u64(uint64_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_dup_u64(ptr);
#else
return simde_vdup_n_u64(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_dup_u64
#define vld1_dup_u64(a) simde_vld1_dup_u64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vld1q_dup_f32(simde_float32 const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_f32(ptr);
#elif \
defined(SIMDE_X86_SSE_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE)
simde_float32x4_private r_;
#if defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_load_ps1(ptr);
#else
r_.v128 = wasm_v128_load32_splat(ptr);
#endif
return simde_float32x4_from_private(r_);
#else
return simde_vdupq_n_f32(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_f32
#define vld1q_dup_f32(a) simde_vld1q_dup_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vld1q_dup_f64(simde_float64 const * ptr) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vld1q_dup_f64(ptr);
#else
return simde_vdupq_n_f64(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_f64
#define vld1q_dup_f64(a) simde_vld1q_dup_f64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vld1q_dup_s8(int8_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_s8(ptr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int8x16_private r_;
r_.v128 = wasm_v128_load8_splat(ptr);
return simde_int8x16_from_private(r_);
#else
return simde_vdupq_n_s8(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_s8
#define vld1q_dup_s8(a) simde_vld1q_dup_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vld1q_dup_s16(int16_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_s16(ptr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int16x8_private r_;
r_.v128 = wasm_v128_load16_splat(ptr);
return simde_int16x8_from_private(r_);
#else
return simde_vdupq_n_s16(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_s16
#define vld1q_dup_s16(a) simde_vld1q_dup_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vld1q_dup_s32(int32_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_s32(ptr);
#elif \
defined(SIMDE_X86_SSE2_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int32x4_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_load_ps1(HEDLEY_REINTERPRET_CAST(float const *, ptr)));
#else
r_.v128 = wasm_v128_load32_splat(ptr);
#endif
return simde_int32x4_from_private(r_);
#else
return simde_vdupq_n_s32(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_s32
#define vld1q_dup_s32(a) simde_vld1q_dup_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vld1q_dup_s64(int64_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_s64(ptr);
#elif \
defined(SIMDE_X86_SSE2_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int64x2_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_set1_epi64x(*ptr);
#else
r_.v128 = wasm_v128_load64_splat(ptr);
#endif
return simde_int64x2_from_private(r_);
#else
return simde_vdupq_n_s64(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_s64
#define vld1q_dup_s64(a) simde_vld1q_dup_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vld1q_dup_u8(uint8_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_u8(ptr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde_uint8x16_private r_;
r_.v128 = wasm_v128_load8_splat(ptr);
return simde_uint8x16_from_private(r_);
#else
return simde_vdupq_n_u8(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_u8
#define vld1q_dup_u8(a) simde_vld1q_dup_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vld1q_dup_u16(uint16_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_u16(ptr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde_uint16x8_private r_;
r_.v128 = wasm_v128_load16_splat(ptr);
return simde_uint16x8_from_private(r_);
#else
return simde_vdupq_n_u16(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_u16
#define vld1q_dup_u16(a) simde_vld1q_dup_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vld1q_dup_u32(uint32_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_u32(ptr);
#elif \
defined(SIMDE_X86_SSE2_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE)
simde_uint32x4_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_load_ps1(HEDLEY_REINTERPRET_CAST(float const *, ptr)));
#else
r_.v128 = wasm_v128_load32_splat(ptr);
#endif
return simde_uint32x4_from_private(r_);
#else
return simde_vdupq_n_u32(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_u32
#define vld1q_dup_u32(a) simde_vld1q_dup_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vld1q_dup_u64(uint64_t const * ptr) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_dup_u64(ptr);
#elif \
defined(SIMDE_X86_SSE2_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE)
simde_uint64x2_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_set1_epi64x(*HEDLEY_REINTERPRET_CAST(int64_t const *, ptr));
#else
r_.v128 = wasm_v128_load64_splat(ptr);
#endif
return simde_uint64x2_from_private(r_);
#else
return simde_vdupq_n_u64(*ptr);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_dup_u64
#define vld1q_dup_u64(a) simde_vld1q_dup_u64((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1_DUP_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cgtz.h | .h | 13,030 | 423 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_CGTZ_H)
#define SIMDE_ARM_NEON_CGTZ_H
#include "cgt.h"
#include "combine.h"
#include "dup_n.h"
#include "get_low.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vcgtzd_s64(int64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint64_t, vcgtzd_s64(a));
#else
return (a > 0) ? UINT64_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzd_s64
#define vcgtzd_s64(a) simde_vcgtzd_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vcgtzd_f64(simde_float64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint64_t, vcgtzd_f64(a));
#else
return (a > SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzd_f64
#define vcgtzd_f64(a) simde_vcgtzd_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vcgtzs_f32(simde_float32_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint32_t, vcgtzs_f32(a));
#else
return (a > SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzs_f32
#define vcgtzs_f32(a) simde_vcgtzs_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcgtzq_f32(simde_float32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtzq_f32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgtq_f32(a, simde_vdupq_n_f32(SIMDE_FLOAT32_C(0.0)));
#else
simde_float32x4_private a_ = simde_float32x4_to_private(a);
simde_uint32x4_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > SIMDE_FLOAT32_C(0.0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgtzs_f32(a_.values[i]);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzq_f32
#define vcgtzq_f32(a) simde_vcgtzq_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcgtzq_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtzq_f64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgtq_f64(a, simde_vdupq_n_f64(SIMDE_FLOAT64_C(0.0)));
#else
simde_float64x2_private a_ = simde_float64x2_to_private(a);
simde_uint64x2_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > SIMDE_FLOAT64_C(0.0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgtzd_f64(a_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzq_f64
#define vcgtzq_f64(a) simde_vcgtzq_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vcgtzq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtzq_s8(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgtq_s8(a, simde_vdupq_n_s8(0));
#else
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_uint8x16_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] > 0) ? UINT8_MAX : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzq_s8
#define vcgtzq_s8(a) simde_vcgtzq_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vcgtzq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtzq_s16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgtq_s16(a, simde_vdupq_n_s16(0));
#else
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_uint16x8_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] > 0) ? UINT16_MAX : 0;
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzq_s16
#define vcgtzq_s16(a) simde_vcgtzq_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcgtzq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtzq_s32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgtq_s32(a, simde_vdupq_n_s32(0));
#else
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_uint32x4_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] > 0) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzq_s32
#define vcgtzq_s32(a) simde_vcgtzq_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcgtzq_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtzq_s64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgtq_s64(a, simde_vdupq_n_s64(0));
#else
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_uint64x2_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgtzd_s64(a_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtzq_s64
#define vcgtzq_s64(a) simde_vcgtzq_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcgtz_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtz_f32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgt_f32(a, simde_vdup_n_f32(SIMDE_FLOAT32_C(0.0)));
#else
simde_float32x2_private a_ = simde_float32x2_to_private(a);
simde_uint32x2_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > SIMDE_FLOAT32_C(0.0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgtzs_f32(a_.values[i]);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtz_f32
#define vcgtz_f32(a) simde_vcgtz_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcgtz_f64(simde_float64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtz_f64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgt_f64(a, simde_vdup_n_f64(SIMDE_FLOAT64_C(0.0)));
#else
simde_float64x1_private a_ = simde_float64x1_to_private(a);
simde_uint64x1_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > SIMDE_FLOAT64_C(0.0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgtzd_f64(a_.values[i]);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtz_f64
#define vcgtz_f64(a) simde_vcgtz_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vcgtz_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtz_s8(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgt_s8(a, simde_vdup_n_s8(0));
#else
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_uint8x8_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] > 0) ? UINT8_MAX : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtz_s8
#define vcgtz_s8(a) simde_vcgtz_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vcgtz_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtz_s16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgt_s16(a, simde_vdup_n_s16(0));
#else
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_uint16x4_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] > 0) ? UINT16_MAX : 0;
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtz_s16
#define vcgtz_s16(a) simde_vcgtz_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcgtz_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtz_s32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgt_s32(a, simde_vdup_n_s32(0));
#else
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_uint32x2_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] > 0) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtz_s32
#define vcgtz_s32(a) simde_vcgtz_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcgtz_s64(simde_int64x1_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgtz_s64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vcgt_s64(a, simde_vdup_n_s64(0));
#else
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_uint64x1_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgtzd_s64(a_.values[i]);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgtz_s64
#define vcgtz_s64(a) simde_vcgtz_s64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CGTZ_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/create.h | .h | 5,174 | 187 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
/* N.B. CM: vcreate_f16 and vcreate_bf16 are omitted as
* SIMDe has no 16-bit floating point support.
* Idem for the poly types. */
#if !defined(SIMDE_ARM_NEON_CREATE_H)
#define SIMDE_ARM_NEON_CREATE_H
#include "dup_n.h"
#include "reinterpret.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vcreate_s8(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_s8(a);
#else
return simde_vreinterpret_s8_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_s8
#define vcreate_s8(a) simde_vcreate_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vcreate_s16(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_s16(a);
#else
return simde_vreinterpret_s16_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_s16
#define vcreate_s16(a) simde_vcreate_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vcreate_s32(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_s32(a);
#else
return simde_vreinterpret_s32_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_s32
#define vcreate_s32(a) simde_vcreate_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vcreate_s64(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_s64(a);
#else
return simde_vreinterpret_s64_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_s64
#define vcreate_s64(a) simde_vcreate_s64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vcreate_u8(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_u8(a);
#else
return simde_vreinterpret_u8_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_u8
#define vcreate_u8(a) simde_vcreate_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vcreate_u16(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_u16(a);
#else
return simde_vreinterpret_u16_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_u16
#define vcreate_u16(a) simde_vcreate_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcreate_u32(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_u32(a);
#else
return simde_vreinterpret_u32_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_u32
#define vcreate_u32(a) simde_vcreate_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcreate_u64(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_u64(a);
#else
return simde_vdup_n_u64(a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_u64
#define vcreate_u64(a) simde_vcreate_u64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vcreate_f32(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcreate_f32(a);
#else
return simde_vreinterpret_f32_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_f32
#define vcreate_f32(a) simde_vcreate_f32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vcreate_f64(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcreate_f64(a);
#else
return simde_vreinterpret_f64_u64(simde_vdup_n_u64(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcreate_f64
#define vcreate_f64(a) simde_vcreate_f64(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CREATE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mla_lane.h | .h | 5,623 | 141 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MLA_LANE_H)
#define SIMDE_ARM_NEON_MLA_LANE_H
#include "mla.h"
#include "dup_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmla_lane_f32(a, b, v, lane) vmla_lane_f32((a), (b), (v), (lane))
#else
#define simde_vmla_lane_f32(a, b, v, lane) simde_vmla_f32((a), (b), simde_vdup_lane_f32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_lane_f32
#define vmla_lane_f32(a, b, v, lane) simde_vmla_lane_f32((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmla_lane_s16(a, b, v, lane) vmla_lane_s16((a), (b), (v), (lane))
#else
#define simde_vmla_lane_s16(a, b, v, lane) simde_vmla_s16((a), (b), simde_vdup_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_lane_s16
#define vmla_lane_s16(a, b, v, lane) simde_vmla_lane_s16((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmla_lane_s32(a, b, v, lane) vmla_lane_s32((a), (b), (v), (lane))
#else
#define simde_vmla_lane_s32(a, b, v, lane) simde_vmla_s32((a), (b), simde_vdup_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_lane_s32
#define vmla_lane_s32(a, b, v, lane) simde_vmla_lane_s32((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmla_lane_u16(a, b, v, lane) vmla_lane_u16((a), (b), (v), (lane))
#else
#define simde_vmla_lane_u16(a, b, v, lane) simde_vmla_u16((a), (b), simde_vdup_lane_u16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_lane_u16
#define vmla_lane_u16(a, b, v, lane) simde_vmla_lane_u16((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmla_lane_u32(a, b, v, lane) vmla_lane_u32((a), (b), (v), (lane))
#else
#define simde_vmla_lane_u32(a, b, v, lane) simde_vmla_u32((a), (b), simde_vdup_lane_u32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_lane_u32
#define vmla_lane_u32(a, b, v, lane) simde_vmla_lane_u32((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlaq_lane_f32(a, b, v, lane) vmlaq_lane_f32((a), (b), (v), (lane))
#else
#define simde_vmlaq_lane_f32(a, b, v, lane) simde_vmlaq_f32((a), (b), simde_vdupq_lane_f32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_lane_f32
#define vmlaq_lane_f32(a, b, v, lane) simde_vmlaq_lane_f32((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlaq_lane_s16(a, b, v, lane) vmlaq_lane_s16((a), (b), (v), (lane))
#else
#define simde_vmlaq_lane_s16(a, b, v, lane) simde_vmlaq_s16((a), (b), simde_vdupq_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_lane_s16
#define vmlaq_lane_s16(a, b, v, lane) simde_vmlaq_lane_s16((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlaq_lane_s32(a, b, v, lane) vmlaq_lane_s32((a), (b), (v), (lane))
#else
#define simde_vmlaq_lane_s32(a, b, v, lane) simde_vmlaq_s32((a), (b), simde_vdupq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_lane_s32
#define vmlaq_lane_s32(a, b, v, lane) simde_vmlaq_lane_s32((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlaq_lane_u16(a, b, v, lane) vmlaq_lane_u16((a), (b), (v), (lane))
#else
#define simde_vmlaq_lane_u16(a, b, v, lane) simde_vmlaq_u16((a), (b), simde_vdupq_lane_u16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_lane_u16
#define vmlaq_lane_u16(a, b, v, lane) simde_vmlaq_lane_u16((a), (b), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmlaq_lane_u32(a, b, v, lane) vmlaq_lane_u32((a), (b), (v), (lane))
#else
#define simde_vmlaq_lane_u32(a, b, v, lane) simde_vmlaq_u32((a), (b), simde_vdupq_lane_u32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_lane_u32
#define vmlaq_lane_u32(a, b, v, lane) simde_vmlaq_lane_u32((a), (b), (v), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLA_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/zip1.h | .h | 22,121 | 668 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ZIP1_H)
#define SIMDE_ARM_NEON_ZIP1_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vzip1_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_f32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2x2_t tmp = vzip_f32(a, b);
return tmp.val[0];
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_f32
#define vzip1_f32(a, b) simde_vzip1_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vzip1_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_s8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x8x2_t tmp = vzip_s8(a, b);
return tmp.val[0];
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_s8
#define vzip1_s8(a, b) simde_vzip1_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vzip1_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_s16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x4x2_t tmp = vzip_s16(a, b);
return tmp.val[0];
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 4, 1, 5);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_s16
#define vzip1_s16(a, b) simde_vzip1_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vzip1_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_s32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x2x2_t tmp = vzip_s32(a, b);
return tmp.val[0];
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_s32
#define vzip1_s32(a, b) simde_vzip1_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vzip1_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_u8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x2_t tmp = vzip_u8(a, b);
return tmp.val[0];
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_u8
#define vzip1_u8(a, b) simde_vzip1_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vzip1_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_u16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4x2_t tmp = vzip_u16(a, b);
return tmp.val[0];
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 4, 1, 5);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_u16
#define vzip1_u16(a, b) simde_vzip1_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vzip1_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1_u32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x2x2_t tmp = vzip_u32(a, b);
return tmp.val[0];
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpacklo_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1_u32
#define vzip1_u32(a, b) simde_vzip1_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vzip1q_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_f32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2x2_t tmp = vzip_f32(vget_low_f32(a), vget_low_f32(b));
return vcombine_f32(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 4, 1, 5);
#elif defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_unpacklo_ps(a_.m128, b_.m128);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_f32
#define vzip1q_f32(a, b) simde_vzip1q_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vzip1q_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_f64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_mergeh(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128d = _mm_unpacklo_pd(a_.m128d, b_.m128d);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_f64
#define vzip1q_f64(a, b) simde_vzip1q_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vzip1q_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_s8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x8x2_t tmp = vzip_s8(vget_low_s8(a), vget_low_s8(b));
return vcombine_s8(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_s8
#define vzip1q_s8(a, b) simde_vzip1q_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vzip1q_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_s16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x4x2_t tmp = vzip_s16(vget_low_s16(a), vget_low_s16(b));
return vcombine_s16(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 8, 1, 9, 2, 10, 3, 11);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_s16
#define vzip1q_s16(a, b) simde_vzip1q_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vzip1q_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_s32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x2x2_t tmp = vzip_s32(vget_low_s32(a), vget_low_s32(b));
return vcombine_s32(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 4, 1, 5);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_s32
#define vzip1q_s32(a, b) simde_vzip1q_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vzip1q_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_mergeh(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_s64
#define vzip1q_s64(a, b) simde_vzip1q_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vzip1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_u8(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x2_t tmp = vzip_u8(vget_low_u8(a), vget_low_u8(b));
return vcombine_u8(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_u8
#define vzip1q_u8(a, b) simde_vzip1q_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vzip1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_u16(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4x2_t tmp = vzip_u16(vget_low_u16(a), vget_low_u16(b));
return vcombine_u16(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 8, 1, 9, 2, 10, 3, 11);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_u16
#define vzip1q_u16(a, b) simde_vzip1q_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vzip1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_u32(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x2x2_t tmp = vzip_u32(vget_low_u32(a), vget_low_u32(b));
return vcombine_u32(tmp.val[0], tmp.val[1]);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergeh(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 4, 1, 5);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_u32
#define vzip1q_u32(a, b) simde_vzip1q_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vzip1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip1q_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_mergeh(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpacklo_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[2 * i ] = a_.values[i];
r_.values[2 * i + 1] = b_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip1q_u64
#define vzip1q_u64(a, b) simde_vzip1q_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ZIP1_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/addlv.h | .h | 9,747 | 342 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_ADDLV_H)
#define SIMDE_ARM_NEON_ADDLV_H
#include "types.h"
#include "movl.h"
#include "addv.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vaddlv_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlv_s8(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddvq_s16(simde_vmovl_s8(a));
#else
simde_int8x8_private a_ = simde_int8x8_to_private(a);
int16_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlv_s8
#define vaddlv_s8(a) simde_vaddlv_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vaddlv_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlv_s16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddvq_s32(simde_vmovl_s16(a));
#else
simde_int16x4_private a_ = simde_int16x4_to_private(a);
int32_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlv_s16
#define vaddlv_s16(a) simde_vaddlv_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vaddlv_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlv_s32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddvq_s64(simde_vmovl_s32(a));
#else
simde_int32x2_private a_ = simde_int32x2_to_private(a);
int64_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlv_s32
#define vaddlv_s32(a) simde_vaddlv_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vaddlv_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlv_u8(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddvq_u16(simde_vmovl_u8(a));
#else
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
uint16_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlv_u8
#define vaddlv_u8(a) simde_vaddlv_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vaddlv_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlv_u16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddvq_u32(simde_vmovl_u16(a));
#else
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
uint32_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlv_u16
#define vaddlv_u16(a) simde_vaddlv_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vaddlv_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlv_u32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddvq_u64(simde_vmovl_u32(a));
#else
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
uint64_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlv_u32
#define vaddlv_u32(a) simde_vaddlv_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vaddlvq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlvq_s8(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
__m128i a_ = simde_int8x16_to_m128i(a);
a_ = _mm_xor_si128(a_, _mm_set1_epi8('\x80'));
a_ = _mm_sad_epu8(a_, _mm_setzero_si128());
a_ = _mm_add_epi16(a_, _mm_shuffle_epi32(a_, 0xEE));
return HEDLEY_STATIC_CAST(int16_t, _mm_cvtsi128_si32(a_) - 2048);
#else
simde_int8x16_private a_ = simde_int8x16_to_private(a);
int16_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlvq_s8
#define vaddlvq_s8(a) simde_vaddlvq_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vaddlvq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlvq_s16(a);
#elif defined(SIMDE_X86_SSSE3_NATIVE) && !defined(HEDLEY_MSVC_VERSION)
__m128i a_ = simde_int16x8_to_m128i(a);
a_ = _mm_xor_si128(a_, _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, 0x8000)));
a_ = _mm_shuffle_epi8(a_, _mm_set_epi8(15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0));
a_ = _mm_sad_epu8(a_, _mm_setzero_si128());
a_ = _mm_add_epi32(a_, _mm_srli_si128(a_, 7));
return _mm_cvtsi128_si32(a_) - 262144;
#else
simde_int16x8_private a_ = simde_int16x8_to_private(a);
int32_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlvq_s16
#define vaddlvq_s16(a) simde_vaddlvq_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vaddlvq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlvq_s32(a);
#else
simde_int32x4_private a_ = simde_int32x4_to_private(a);
int64_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlvq_s32
#define vaddlvq_s32(a) simde_vaddlvq_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vaddlvq_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlvq_u8(a);
#elif defined(SIMDE_X86_SSE2_NATIVE)
__m128i a_ = simde_uint8x16_to_m128i(a);
a_ = _mm_sad_epu8(a_, _mm_setzero_si128());
a_ = _mm_add_epi16(a_, _mm_shuffle_epi32(a_, 0xEE));
return HEDLEY_STATIC_CAST(uint16_t, _mm_cvtsi128_si32(a_));
#else
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
uint16_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlvq_u8
#define vaddlvq_u8(a) simde_vaddlvq_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vaddlvq_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlvq_u16(a);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
__m128i a_ = simde_uint16x8_to_m128i(a);
a_ = _mm_shuffle_epi8(a_, _mm_set_epi8(15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0));
a_ = _mm_sad_epu8(a_, _mm_setzero_si128());
a_ = _mm_add_epi32(a_, _mm_srli_si128(a_, 7));
return HEDLEY_STATIC_CAST(uint32_t, _mm_cvtsi128_si32(a_));
#else
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
uint32_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlvq_u16
#define vaddlvq_u16(a) simde_vaddlvq_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vaddlvq_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vaddlvq_u32(a);
#else
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
uint64_t r = 0;
SIMDE_VECTORIZE_REDUCTION(+:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r += a_.values[i];
}
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddlvq_u32
#define vaddlvq_u32(a) simde_vaddlvq_u32(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ADDLV_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/sub.h | .h | 19,685 | 688 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_SUB_H)
#define SIMDE_ARM_NEON_SUB_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vsubd_s64(int64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubd_s64(a, b);
#else
return a - b;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubd_s64
#define vsubd_s64(a, b) simde_vsubd_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vsubd_u64(uint64_t a, uint64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubd_u64(a, b);
#else
return a - b;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubd_u64
#define vsubd_u64(a, b) simde_vsubd_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vsub_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_f32(a, b);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_f32
#define vsub_f32(a, b) simde_vsub_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vsub_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsub_f64(a, b);
#else
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsub_f64
#define vsub_f64(a, b) simde_vsub_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vsub_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_sub_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_s8
#define vsub_s8(a, b) simde_vsub_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vsub_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_sub_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_s16
#define vsub_s16(a, b) simde_vsub_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vsub_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_sub_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_s32
#define vsub_s32(a, b) simde_vsub_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vsub_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_s64(a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsubd_s64(a_.values[i], b_.values[i]);
}
#endif
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_s64
#define vsub_s64(a, b) simde_vsub_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_sub_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_u8
#define vsub_u8(a, b) simde_vsub_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_sub_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_u16
#define vsub_u16(a, b) simde_vsub_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_sub_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_u32
#define vsub_u32(a, b) simde_vsub_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vsub_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsub_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsubd_u64(a_.values[i], b_.values[i]);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsub_u64
#define vsub_u64(a, b) simde_vsub_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vsubq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_f32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(float) a_ , b_, r_;
a_ = a;
b_ = b;
r_ = vec_sub(a_, b_);
return r_;
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_sub_ps(a_.m128, b_.m128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f32x4_sub(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_f32
#define vsubq_f32(a, b) simde_vsubq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vsubq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubq_f64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_sub(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128d = _mm_sub_pd(a_.m128d, b_.m128d);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f64x2_sub(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubq_f64
#define vsubq_f64(a, b) simde_vsubq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vsubq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_sub(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_sub(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_s8
#define vsubq_s8(a, b) simde_vsubq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vsubq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_sub(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_sub(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_s16
#define vsubq_s16(a, b) simde_vsubq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vsubq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_sub(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_sub(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_s32
#define vsubq_s32(a, b) simde_vsubq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vsubq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_sub(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_sub(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsubd_s64(a_.values[i], b_.values[i]);
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_s64
#define vsubq_s64(a, b) simde_vsubq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_sub(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_u8
#define vsubq_u8(a, b) simde_vsubq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_sub(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_u16
#define vsubq_u16(a, b) simde_vsubq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_sub(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_u32
#define vsubq_u32(a, b) simde_vsubq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vsubq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsubq_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_sub(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values - b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsubd_u64(a_.values[i], b_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vsubq_u64
#define vsubq_u64(a, b) simde_vsubq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SUB_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mls.h | .h | 8,532 | 281 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MLS_H)
#define SIMDE_ARM_NEON_MLS_H
#include "mul.h"
#include "sub.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmls_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_f32(a, b, c);
#else
return simde_vsub_f32(a, simde_vmul_f32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_f32
#define vmls_f32(a, b, c) simde_vmls_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmls_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64x1_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmls_f64(a, b, c);
#else
return simde_vsub_f64(a, simde_vmul_f64(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_f64
#define vmls_f64(a, b, c) simde_vmls_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vmls_s8(simde_int8x8_t a, simde_int8x8_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_s8(a, b, c);
#else
return simde_vsub_s8(a, simde_vmul_s8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_s8
#define vmls_s8(a, b, c) simde_vmls_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmls_s16(simde_int16x4_t a, simde_int16x4_t b, simde_int16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_s16(a, b, c);
#else
return simde_vsub_s16(a, simde_vmul_s16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_s16
#define vmls_s16(a, b, c) simde_vmls_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmls_s32(simde_int32x2_t a, simde_int32x2_t b, simde_int32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_s32(a, b, c);
#else
return simde_vsub_s32(a, simde_vmul_s32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_s32
#define vmls_s32(a, b, c) simde_vmls_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vmls_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_u8(a, b, c);
#else
return simde_vsub_u8(a, simde_vmul_u8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_u8
#define vmls_u8(a, b, c) simde_vmls_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmls_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_u16(a, b, c);
#else
return simde_vsub_u16(a, simde_vmul_u16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_u16
#define vmls_u16(a, b, c) simde_vmls_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmls_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmls_u32(a, b, c);
#else
return simde_vsub_u32(a, simde_vmul_u32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmls_u32
#define vmls_u32(a, b, c) simde_vmls_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmlsq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_f32(a, b, c);
#elif defined(SIMDE_X86_FMA_NATIVE)
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b),
c_ = simde_float32x4_to_private(c);
r_.m128 = _mm_fnmadd_ps(b_.m128, c_.m128, a_.m128);
return simde_float32x4_from_private(r_);
#else
return simde_vsubq_f32(a, simde_vmulq_f32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_f32
#define vmlsq_f32(a, b, c) simde_vmlsq_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmlsq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlsq_f64(a, b, c);
#elif defined(SIMDE_X86_FMA_NATIVE)
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b),
c_ = simde_float64x2_to_private(c);
r_.m128d = _mm_fnmadd_pd(b_.m128d, c_.m128d, a_.m128d);
return simde_float64x2_from_private(r_);
#else
return simde_vsubq_f64(a, simde_vmulq_f64(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_f64
#define vmlsq_f64(a, b, c) simde_vmlsq_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vmlsq_s8(simde_int8x16_t a, simde_int8x16_t b, simde_int8x16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_s8(a, b, c);
#else
return simde_vsubq_s8(a, simde_vmulq_s8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_s8
#define vmlsq_s8(a, b, c) simde_vmlsq_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlsq_s16(simde_int16x8_t a, simde_int16x8_t b, simde_int16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_s16(a, b, c);
#else
return simde_vsubq_s16(a, simde_vmulq_s16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_s16
#define vmlsq_s16(a, b, c) simde_vmlsq_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlsq_s32(simde_int32x4_t a, simde_int32x4_t b, simde_int32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_s32(a, b, c);
#else
return simde_vsubq_s32(a, simde_vmulq_s32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_s32
#define vmlsq_s32(a, b, c) simde_vmlsq_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vmlsq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_u8(a, b, c);
#else
return simde_vsubq_u8(a, simde_vmulq_u8(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_u8
#define vmlsq_u8(a, b, c) simde_vmlsq_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlsq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_u16(a, b, c);
#else
return simde_vsubq_u16(a, simde_vmulq_u16(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_u16
#define vmlsq_u16(a, b, c) simde_vmlsq_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlsq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlsq_u32(a, b, c);
#else
return simde_vsubq_u32(a, simde_vmulq_u32(b, c));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlsq_u32
#define vmlsq_u32(a, b, c) simde_vmlsq_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/dup_lane.h | .h | 45,828 | 1,201 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020-2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_DUP_LANE_H)
#define SIMDE_ARM_NEON_DUP_LANE_H
#include "dup_n.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vdups_lane_s32(simde_int32x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_int32x2_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdups_lane_s32(vec, lane) vdups_lane_s32(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdups_lane_s32
#define vdups_lane_s32(vec, lane) simde_vdups_lane_s32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vdups_lane_u32(simde_uint32x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_uint32x2_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdups_lane_u32(vec, lane) vdups_lane_u32(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdups_lane_u32
#define vdups_lane_u32(vec, lane) simde_vdups_lane_u32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vdups_lane_f32(simde_float32x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_float32x2_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdups_lane_f32(vec, lane) vdups_lane_f32(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdups_lane_f32
#define vdups_lane_f32(vec, lane) simde_vdups_lane_f32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vdups_laneq_s32(simde_int32x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_int32x4_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdups_laneq_s32(vec, lane) vdups_laneq_s32(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdups_laneq_s32
#define vdups_laneq_s32(vec, lane) simde_vdups_laneq_s32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vdups_laneq_u32(simde_uint32x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_uint32x4_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdups_laneq_u32(vec, lane) vdups_laneq_u32(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdups_laneq_u32
#define vdups_laneq_u32(vec, lane) simde_vdups_laneq_u32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vdups_laneq_f32(simde_float32x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_float32x4_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdups_laneq_f32(vec, lane) vdups_laneq_f32(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdups_laneq_f32
#define vdups_laneq_f32(vec, lane) simde_vdups_laneq_f32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vdupd_lane_s64(simde_int64x1_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return simde_int64x1_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupd_lane_s64(vec, lane) vdupd_lane_s64(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupd_lane_s64
#define vdupd_lane_s64(vec, lane) simde_vdupd_lane_s64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vdupd_lane_u64(simde_uint64x1_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return simde_uint64x1_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupd_lane_u64(vec, lane) vdupd_lane_u64(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupd_lane_u64
#define vdupd_lane_u64(vec, lane) simde_vdupd_lane_u64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vdupd_lane_f64(simde_float64x1_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return simde_float64x1_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupd_lane_f64(vec, lane) vdupd_lane_f64(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupd_lane_f64
#define vdupd_lane_f64(vec, lane) simde_vdupd_lane_f64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vdupd_laneq_s64(simde_int64x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_int64x2_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupd_laneq_s64(vec, lane) vdupd_laneq_s64(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupd_laneq_s64
#define vdupd_laneq_s64(vec, lane) simde_vdupd_laneq_s64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vdupd_laneq_u64(simde_uint64x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_uint64x2_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupd_laneq_u64(vec, lane) vdupd_laneq_u64(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupd_laneq_u64
#define vdupd_laneq_u64(vec, lane) simde_vdupd_laneq_u64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vdupd_laneq_f64(simde_float64x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_float64x2_to_private(vec).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupd_laneq_f64(vec, lane) vdupd_laneq_f64(vec, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupd_laneq_f64
#define vdupd_laneq_f64(vec, lane) simde_vdupd_laneq_f64((vec), (lane))
#endif
//simde_vdup_lane_f32
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_f32(vec, lane) vdup_lane_f32(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_f32(vec, lane) (__extension__ ({ \
simde_float32x2_private simde_vdup_lane_f32_vec_ = simde_float32x2_to_private(vec); \
simde_float32x2_private simde_vdup_lane_f32_r_; \
simde_vdup_lane_f32_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
32, 8, \
simde_vdup_lane_f32_vec_.values, \
simde_vdup_lane_f32_vec_.values, \
lane, lane \
); \
simde_float32x2_from_private(simde_vdup_lane_f32_r_); \
}))
#else
#define simde_vdup_lane_f32(vec, lane) simde_vdup_n_f32(simde_vdups_lane_f32(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_f32
#define vdup_lane_f32(vec, lane) simde_vdup_lane_f32((vec), (lane))
#endif
//simde_vdup_lane_f64
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_lane_f64(vec, lane) vdup_lane_f64(vec, lane)
#else
#define simde_vdup_lane_f64(vec, lane) simde_vdup_n_f64(simde_vdupd_lane_f64(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_f64
#define vdup_lane_f64(vec, lane) simde_vdup_lane_f64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vdup_lane_s8(simde_int8x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdup_n_s8(simde_int8x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_s8(vec, lane) vdup_lane_s8(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_s8(vec, lane) (__extension__ ({ \
simde_int8x8_private simde_vdup_lane_s8_vec_ = simde_int8x8_to_private(vec); \
simde_int8x8_private simde_vdup_lane_s8_r_; \
simde_vdup_lane_s8_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
8, 8, \
simde_vdup_lane_s8_vec_.values, \
simde_vdup_lane_s8_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_int8x8_from_private(simde_vdup_lane_s8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_s8
#define vdup_lane_s8(vec, lane) simde_vdup_lane_s8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vdup_lane_s16(simde_int16x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdup_n_s16(simde_int16x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_s16(vec, lane) vdup_lane_s16(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_s16(vec, lane) (__extension__ ({ \
simde_int16x4_private simde_vdup_lane_s16_vec_ = simde_int16x4_to_private(vec); \
simde_int16x4_private simde_vdup_lane_s16_r_; \
simde_vdup_lane_s16_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
16, 8, \
simde_vdup_lane_s16_vec_.values, \
simde_vdup_lane_s16_vec_.values, \
lane, lane, lane, lane \
); \
simde_int16x4_from_private(simde_vdup_lane_s16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_s16
#define vdup_lane_s16(vec, lane) simde_vdup_lane_s16((vec), (lane))
#endif
//simde_vdup_lane_s32
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_s32(vec, lane) vdup_lane_s32(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_s32(vec, lane) (__extension__ ({ \
simde_int32x2_private simde_vdup_lane_s32_vec_ = simde_int32x2_to_private(vec); \
simde_int32x2_private simde_vdup_lane_s32_r_; \
simde_vdup_lane_s32_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
32, 8, \
simde_vdup_lane_s32_vec_.values, \
simde_vdup_lane_s32_vec_.values, \
lane, lane \
); \
simde_int32x2_from_private(simde_vdup_lane_s32_r_); \
}))
#else
#define simde_vdup_lane_s32(vec, lane) simde_vdup_n_s32(simde_vdups_lane_s32(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_s32
#define vdup_lane_s32(vec, lane) simde_vdup_lane_s32((vec), (lane))
#endif
//simde_vdup_lane_s64
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_s64(vec, lane) vdup_lane_s64(vec, lane)
#else
#define simde_vdup_lane_s64(vec, lane) simde_vdup_n_s64(simde_vdupd_lane_s64(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_s64
#define vdup_lane_s64(vec, lane) simde_vdup_lane_s64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vdup_lane_u8(simde_uint8x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdup_n_u8(simde_uint8x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_u8(vec, lane) vdup_lane_u8(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_u8(vec, lane) (__extension__ ({ \
simde_uint8x8_private simde_vdup_lane_u8_vec_ = simde_uint8x8_to_private(vec); \
simde_uint8x8_private simde_vdup_lane_u8_r_; \
simde_vdup_lane_u8_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
8, 8, \
simde_vdup_lane_u8_vec_.values, \
simde_vdup_lane_u8_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_uint8x8_from_private(simde_vdup_lane_u8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_u8
#define vdup_lane_u8(vec, lane) simde_vdup_lane_u8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vdup_lane_u16(simde_uint16x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdup_n_u16(simde_uint16x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_u16(vec, lane) vdup_lane_u16(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_u16(vec, lane) (__extension__ ({ \
simde_uint16x4_private simde_vdup_lane_u16_vec_ = simde_uint16x4_to_private(vec); \
simde_uint16x4_private simde_vdup_lane_u16_r_; \
simde_vdup_lane_u16_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
16, 8, \
simde_vdup_lane_u16_vec_.values, \
simde_vdup_lane_u16_vec_.values, \
lane, lane, lane, lane \
); \
simde_uint16x4_from_private(simde_vdup_lane_u16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_u16
#define vdup_lane_u16(vec, lane) simde_vdup_lane_u16((vec), (lane))
#endif
//simde_vdup_lane_u32
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_u32(vec, lane) vdup_lane_u32(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
#define simde_vdup_lane_u32(vec, lane) (__extension__ ({ \
simde_uint32x2_private simde_vdup_lane_u32_vec_ = simde_uint32x2_to_private(vec); \
simde_uint32x2_private simde_vdup_lane_u32_r_; \
simde_vdup_lane_u32_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
32, 8, \
simde_vdup_lane_u32_vec_.values, \
simde_vdup_lane_u32_vec_.values, \
lane, lane \
); \
simde_uint32x2_from_private(simde_vdup_lane_u32_r_); \
}))
#else
#define simde_vdup_lane_u32(vec, lane) simde_vdup_n_u32(simde_vdups_lane_u32(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_u32
#define vdup_lane_u32(vec, lane) simde_vdup_lane_u32((vec), (lane))
#endif
//simde_vdup_lane_u64
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdup_lane_u64(vec, lane) vdup_lane_u64(vec, lane)
#else
#define simde_vdup_lane_u64(vec, lane) simde_vdup_n_u64(simde_vdupd_lane_u64(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdup_lane_u64
#define vdup_lane_u64(vec, lane) simde_vdup_lane_u64((vec), (lane))
#endif
//simde_vdup_laneq_f32
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_f32(vec, lane) vdup_laneq_f32(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_f32(vec, lane) (__extension__ ({ \
simde_float32x4_private simde_vdup_laneq_f32_vec_ = simde_float32x4_to_private(vec); \
simde_float32x2_private simde_vdup_laneq_f32_r_; \
simde_vdup_laneq_f32_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_f32_vec_.values, \
simde_vdup_laneq_f32_vec_.values, \
lane, lane \
); \
simde_float32x2_from_private(simde_vdup_laneq_f32_r_); \
}))
#else
#define simde_vdup_laneq_f32(vec, lane) simde_vdup_n_f32(simde_vdups_laneq_f32(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_f32
#define vdup_laneq_f32(vec, lane) simde_vdup_laneq_f32((vec), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_f64(vec, lane) vdup_laneq_f64(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_f64(vec, lane) (__extension__ ({ \
simde_float64x2_private simde_vdup_laneq_f64_vec_ = simde_float64x2_to_private(vec); \
simde_float64x1_private simde_vdup_laneq_f64_r_; \
simde_vdup_laneq_f64_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_f64_vec_.values, \
simde_vdup_laneq_f64_vec_.values, \
lane \
); \
simde_float64x1_from_private(simde_vdup_laneq_f64_r_); \
}))
#else
#define simde_vdup_laneq_f64(vec, lane) simde_vdup_n_f64(simde_vdupd_laneq_f64(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_f64
#define vdup_laneq_f64(vec, lane) simde_vdup_laneq_f64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vdup_laneq_s8(simde_int8x16_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
return simde_vdup_n_s8(simde_int8x16_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_s8(vec, lane) vdup_laneq_s8(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_s8(vec, lane) (__extension__ ({ \
simde_int8x16_private simde_vdup_laneq_s8_vec_ = simde_int8x16_to_private(vec); \
simde_int8x8_private simde_vdup_laneq_s8_r_; \
simde_vdup_laneq_s8_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_s8_vec_.values, \
simde_vdup_laneq_s8_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_int8x8_from_private(simde_vdup_laneq_s8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_s8
#define vdup_laneq_s8(vec, lane) simde_vdup_laneq_s8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vdup_laneq_s16(simde_int16x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdup_n_s16(simde_int16x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_s16(vec, lane) vdup_laneq_s16(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_s16(vec, lane) (__extension__ ({ \
simde_int16x8_private simde_vdup_laneq_s16_vec_ = simde_int16x8_to_private(vec); \
simde_int16x4_private simde_vdup_laneq_s16_r_; \
simde_vdup_laneq_s16_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_s16_vec_.values, \
simde_vdup_laneq_s16_vec_.values, \
lane, lane, lane, lane \
); \
simde_int16x4_from_private(simde_vdup_laneq_s16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_s16
#define vdup_laneq_s16(vec, lane) simde_vdup_laneq_s16((vec), (lane))
#endif
//simde_vdup_laneq_s32
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_s32(vec, lane) vdup_laneq_s32(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_s32(vec, lane) (__extension__ ({ \
simde_int32x4_private simde_vdup_laneq_s32_vec_ = simde_int32x4_to_private(vec); \
simde_int32x2_private simde_vdup_laneq_s32_r_; \
simde_vdup_laneq_s32_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_s32_vec_.values, \
simde_vdup_laneq_s32_vec_.values, \
lane, lane \
); \
simde_int32x2_from_private(simde_vdup_laneq_s32_r_); \
}))
#else
#define simde_vdup_laneq_s32(vec, lane) simde_vdup_n_s32(simde_vdups_laneq_s32(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_s32
#define vdup_laneq_s32(vec, lane) simde_vdup_laneq_s32((vec), (lane))
#endif
//simde_vdup_laneq_s64
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_s64(vec, lane) vdup_laneq_s64(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_s64(vec, lane) (__extension__ ({ \
simde_int64x2_private simde_vdup_laneq_s64_vec_ = simde_int64x2_to_private(vec); \
simde_int64x1_private simde_vdup_laneq_s64_r_; \
simde_vdup_laneq_s64_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_s64_vec_.values, \
simde_vdup_laneq_s64_vec_.values, \
lane \
); \
simde_int64x1_from_private(simde_vdup_laneq_s64_r_); \
}))
#else
#define simde_vdup_laneq_s64(vec, lane) simde_vdup_n_s64(simde_vdupd_laneq_s64(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_s64
#define vdup_laneq_s64(vec, lane) simde_vdup_laneq_s64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vdup_laneq_u8(simde_uint8x16_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
return simde_vdup_n_u8(simde_uint8x16_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_u8(vec, lane) vdup_laneq_u8(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_u8(vec, lane) (__extension__ ({ \
simde_uint8x16_private simde_vdup_laneq_u8_vec_ = simde_uint8x16_to_private(vec); \
simde_uint8x8_private simde_vdup_laneq_u8_r_; \
simde_vdup_laneq_u8_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_u8_vec_.values, \
simde_vdup_laneq_u8_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_uint8x8_from_private(simde_vdup_laneq_u8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_u8
#define vdup_laneq_u8(vec, lane) simde_vdup_laneq_u8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vdup_laneq_u16(simde_uint16x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdup_n_u16(simde_uint16x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_u16(vec, lane) vdup_laneq_u16(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_u16(vec, lane) (__extension__ ({ \
simde_uint16x8_private simde_vdup_laneq_u16_vec_ = simde_uint16x8_to_private(vec); \
simde_uint16x4_private simde_vdup_laneq_u16_r_; \
simde_vdup_laneq_u16_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_u16_vec_.values, \
simde_vdup_laneq_u16_vec_.values, \
lane, lane, lane, lane \
); \
simde_uint16x4_from_private(simde_vdup_laneq_u16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_u16
#define vdup_laneq_u16(vec, lane) simde_vdup_laneq_u16((vec), (lane))
#endif
//simde_vdup_laneq_u32
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_u32(vec, lane) vdup_laneq_u32(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_u32(vec, lane) (__extension__ ({ \
simde_uint32x4_private simde_vdup_laneq_u32_vec_ = simde_uint32x4_to_private(vec); \
simde_uint32x2_private simde_vdup_laneq_u32_r_; \
simde_vdup_laneq_u32_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_u32_vec_.values, \
simde_vdup_laneq_u32_vec_.values, \
lane, lane \
); \
simde_uint32x2_from_private(simde_vdup_laneq_u32_r_); \
}))
#else
#define simde_vdup_laneq_u32(vec, lane) simde_vdup_n_u32(simde_vdups_laneq_u32(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_u32
#define vdup_laneq_u32(vec, lane) simde_vdup_laneq_u32((vec), (lane))
#endif
//simde_vdup_laneq_u64
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdup_laneq_u64(vec, lane) vdup_laneq_u64(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdup_laneq_u64(vec, lane) (__extension__ ({ \
simde_uint64x2_private simde_vdup_laneq_u64_vec_ = simde_uint64x2_to_private(vec); \
simde_uint64x1_private simde_vdup_laneq_u64_r_; \
simde_vdup_laneq_u64_r_.values = \
__builtin_shufflevector( \
simde_vdup_laneq_u64_vec_.values, \
simde_vdup_laneq_u64_vec_.values, \
lane \
); \
simde_uint64x1_from_private(simde_vdup_laneq_u64_r_); \
}))
#else
#define simde_vdup_laneq_u64(vec, lane) simde_vdup_n_u64(simde_vdupd_laneq_u64(vec, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdup_laneq_u64
#define vdup_laneq_u64(vec, lane) simde_vdup_laneq_u64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vdupq_lane_f32(simde_float32x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_vdupq_n_f32(simde_float32x2_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_f32(vec, lane) vdupq_lane_f32(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_f32(vec, lane) (__extension__ ({ \
simde_float32x2_private simde_vdupq_lane_f32_vec_ = simde_float32x2_to_private(vec); \
simde_float32x4_private simde_vdupq_lane_f32_r_; \
simde_vdupq_lane_f32_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_f32_vec_.values, \
simde_vdupq_lane_f32_vec_.values, \
lane, lane, lane, lane \
); \
simde_float32x4_from_private(simde_vdupq_lane_f32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_f32
#define vdupq_lane_f32(vec, lane) simde_vdupq_lane_f32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vdupq_lane_f64(simde_float64x1_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return simde_vdupq_n_f64(simde_float64x1_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_lane_f64(vec, lane) vdupq_lane_f64(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_f64(vec, lane) (__extension__ ({ \
simde_float64x1_private simde_vdupq_lane_f64_vec_ = simde_float64x1_to_private(vec); \
simde_float64x2_private simde_vdupq_lane_f64_r_; \
simde_vdupq_lane_f64_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_f64_vec_.values, \
simde_vdupq_lane_f64_vec_.values, \
lane, lane \
); \
simde_float64x2_from_private(simde_vdupq_lane_f64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_f64
#define vdupq_lane_f64(vec, lane) simde_vdupq_lane_f64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vdupq_lane_s8(simde_int8x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdupq_n_s8(simde_int8x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_s8(vec, lane) vdupq_lane_s8(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_s8(vec, lane) (__extension__ ({ \
simde_int8x8_private simde_vdupq_lane_s8_vec_ = simde_int8x8_to_private(vec); \
simde_int8x16_private simde_vdupq_lane_s8_r_; \
simde_vdupq_lane_s8_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_s8_vec_.values, \
simde_vdupq_lane_s8_vec_.values, \
lane, lane, lane, lane, \
lane, lane, lane, lane, \
lane, lane, lane, lane, \
lane, lane, lane, lane \
); \
simde_int8x16_from_private(simde_vdupq_lane_s8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_s8
#define vdupq_lane_s8(vec, lane) simde_vdupq_lane_s8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vdupq_lane_s16(simde_int16x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdupq_n_s16(simde_int16x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_s16(vec, lane) vdupq_lane_s16(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_s16(vec, lane) (__extension__ ({ \
simde_int16x4_private simde_vdupq_lane_s16_vec_ = simde_int16x4_to_private(vec); \
simde_int16x8_private simde_vdupq_lane_s16_r_; \
simde_vdupq_lane_s16_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_s16_vec_.values, \
simde_vdupq_lane_s16_vec_.values, \
lane, lane, lane, lane, \
lane, lane, lane, lane \
); \
simde_int16x8_from_private(simde_vdupq_lane_s16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_s16
#define vdupq_lane_s16(vec, lane) simde_vdupq_lane_s16((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vdupq_lane_s32(simde_int32x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_vdupq_n_s32(simde_int32x2_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_s32(vec, lane) vdupq_lane_s32(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_s32(vec, lane) (__extension__ ({ \
simde_int32x2_private simde_vdupq_lane_s32_vec_ = simde_int32x2_to_private(vec); \
simde_int32x4_private simde_vdupq_lane_s32_r_; \
simde_vdupq_lane_s32_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_s32_vec_.values, \
simde_vdupq_lane_s32_vec_.values, \
lane, lane, lane, lane \
); \
simde_int32x4_from_private(simde_vdupq_lane_s32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_s32
#define vdupq_lane_s32(vec, lane) simde_vdupq_lane_s32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vdupq_lane_s64(simde_int64x1_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return simde_vdupq_n_s64(simde_int64x1_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_s64(vec, lane) vdupq_lane_s64(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_s64(vec, lane) (__extension__ ({ \
simde_int64x1_private simde_vdupq_lane_s64_vec_ = simde_int64x1_to_private(vec); \
simde_int64x2_private simde_vdupq_lane_s64_r_; \
simde_vdupq_lane_s64_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_s64_vec_.values, \
simde_vdupq_lane_s64_vec_.values, \
lane, lane \
); \
simde_int64x2_from_private(simde_vdupq_lane_s64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_s64
#define vdupq_lane_s64(vec, lane) simde_vdupq_lane_s64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vdupq_lane_u8(simde_uint8x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdupq_n_u8(simde_uint8x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_u8(vec, lane) vdupq_lane_u8(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_u8(vec, lane) (__extension__ ({ \
simde_uint8x8_private simde_vdupq_lane_u8_vec_ = simde_uint8x8_to_private(vec); \
simde_uint8x16_private simde_vdupq_lane_u8_r_; \
simde_vdupq_lane_u8_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_u8_vec_.values, \
simde_vdupq_lane_u8_vec_.values, \
lane, lane, lane, lane, \
lane, lane, lane, lane, \
lane, lane, lane, lane, \
lane, lane, lane, lane \
); \
simde_uint8x16_from_private(simde_vdupq_lane_u8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_u8
#define vdupq_lane_u8(vec, lane) simde_vdupq_lane_u8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vdupq_lane_u16(simde_uint16x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdupq_n_u16(simde_uint16x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_u16(vec, lane) vdupq_lane_u16(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_u16(vec, lane) (__extension__ ({ \
simde_uint16x4_private simde_vdupq_lane_u16_vec_ = simde_uint16x4_to_private(vec); \
simde_uint16x8_private simde_vdupq_lane_u16_r_; \
simde_vdupq_lane_u16_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_u16_vec_.values, \
simde_vdupq_lane_u16_vec_.values, \
lane, lane, lane, lane, \
lane, lane, lane, lane \
); \
simde_uint16x8_from_private(simde_vdupq_lane_u16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_u16
#define vdupq_lane_u16(vec, lane) simde_vdupq_lane_u16((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vdupq_lane_u32(simde_uint32x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_vdupq_n_u32(simde_uint32x2_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_u32(vec, lane) vdupq_lane_u32(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_u32(vec, lane) (__extension__ ({ \
simde_uint32x2_private simde_vdupq_lane_u32_vec_ = simde_uint32x2_to_private(vec); \
simde_uint32x4_private simde_vdupq_lane_u32_r_; \
simde_vdupq_lane_u32_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_u32_vec_.values, \
simde_vdupq_lane_u32_vec_.values, \
lane, lane, lane, lane \
); \
simde_uint32x4_from_private(simde_vdupq_lane_u32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_u32
#define vdupq_lane_u32(vec, lane) simde_vdupq_lane_u32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vdupq_lane_u64(simde_uint64x1_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return simde_vdupq_n_u64(simde_uint64x1_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vdupq_lane_u64(vec, lane) vdupq_lane_u64(vec, lane)
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
#define simde_vdupq_lane_u64(vec, lane) (__extension__ ({ \
simde_uint64x1_private simde_vdupq_lane_u64_vec_ = simde_uint64x1_to_private(vec); \
simde_uint64x2_private simde_vdupq_lane_u64_r_; \
simde_vdupq_lane_u64_r_.values = \
__builtin_shufflevector( \
simde_vdupq_lane_u64_vec_.values, \
simde_vdupq_lane_u64_vec_.values, \
lane, lane \
); \
simde_uint64x2_from_private(simde_vdupq_lane_u64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vdupq_lane_u64
#define vdupq_lane_u64(vec, lane) simde_vdupq_lane_u64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vdupq_laneq_f32(simde_float32x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdupq_n_f32(simde_float32x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_f32(vec, lane) vdupq_laneq_f32(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_f32(vec, lane) (__extension__ ({ \
simde_float32x4_private simde_vdupq_laneq_f32_vec_ = simde_float32x4_to_private(vec); \
simde_float32x4_private simde_vdupq_laneq_f32_r_; \
simde_vdupq_laneq_f32_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
32, 16, \
simde_vdupq_laneq_f32_vec_.values, \
simde_vdupq_laneq_f32_vec_.values, \
lane, lane, lane, lane \
); \
simde_float32x4_from_private(simde_vdupq_laneq_f32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_f32
#define vdupq_laneq_f32(vec, lane) simde_vdupq_laneq_f32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vdupq_laneq_f64(simde_float64x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_vdupq_n_f64(simde_float64x2_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_f64(vec, lane) vdupq_laneq_f64(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_f64(vec, lane) (__extension__ ({ \
simde_float64x2_private simde_vdupq_laneq_f64_vec_ = simde_float64x2_to_private(vec); \
simde_float64x2_private simde_vdupq_laneq_f64_r_; \
simde_vdupq_laneq_f64_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
64, 16, \
simde_vdupq_laneq_f64_vec_.values, \
simde_vdupq_laneq_f64_vec_.values, \
lane, lane \
); \
simde_float64x2_from_private(simde_vdupq_laneq_f64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_f64
#define vdupq_laneq_f64(vec, lane) simde_vdupq_laneq_f64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vdupq_laneq_s8(simde_int8x16_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
return simde_vdupq_n_s8(simde_int8x16_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_s8(vec, lane) vdupq_laneq_s8(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_s8(vec, lane) (__extension__ ({ \
simde_int8x16_private simde_vdupq_laneq_s8_vec_ = simde_int8x16_to_private(vec); \
simde_int8x16_private simde_vdupq_laneq_s8_r_; \
simde_vdupq_laneq_s8_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
8, 16, \
simde_vdupq_laneq_s8_vec_.values, \
simde_vdupq_laneq_s8_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_int8x16_from_private(simde_vdupq_laneq_s8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_s8
#define vdupq_laneq_s8(vec, lane) simde_vdupq_laneq_s8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vdupq_laneq_s16(simde_int16x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdupq_n_s16(simde_int16x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_s16(vec, lane) vdupq_laneq_s16(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_s16(vec, lane) (__extension__ ({ \
simde_int16x8_private simde_vdupq_laneq_s16_vec_ = simde_int16x8_to_private(vec); \
simde_int16x8_private simde_vdupq_laneq_s16_r_; \
simde_vdupq_laneq_s16_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
16, 16, \
simde_vdupq_laneq_s16_vec_.values, \
simde_vdupq_laneq_s16_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_int16x8_from_private(simde_vdupq_laneq_s16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_s16
#define vdupq_laneq_s16(vec, lane) simde_vdupq_laneq_s16((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vdupq_laneq_s32(simde_int32x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdupq_n_s32(simde_int32x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_s32(vec, lane) vdupq_laneq_s32(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_s32(vec, lane) (__extension__ ({ \
simde_int32x4_private simde_vdupq_laneq_s32_vec_ = simde_int32x4_to_private(vec); \
simde_int32x4_private simde_vdupq_laneq_s32_r_; \
simde_vdupq_laneq_s32_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
32, 16, \
simde_vdupq_laneq_s32_vec_.values, \
simde_vdupq_laneq_s32_vec_.values, \
lane, lane, lane, lane \
); \
simde_int32x4_from_private(simde_vdupq_laneq_s32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_s32
#define vdupq_laneq_s32(vec, lane) simde_vdupq_laneq_s32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vdupq_laneq_s64(simde_int64x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_vdupq_n_s64(simde_int64x2_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_s64(vec, lane) vdupq_laneq_s64(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_s64(vec, lane) (__extension__ ({ \
simde_int64x2_private simde_vdupq_laneq_s64_vec_ = simde_int64x2_to_private(vec); \
simde_int64x2_private simde_vdupq_laneq_s64_r_; \
simde_vdupq_laneq_s64_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
64, 16, \
simde_vdupq_laneq_s64_vec_.values, \
simde_vdupq_laneq_s64_vec_.values, \
lane, lane \
); \
simde_int64x2_from_private(simde_vdupq_laneq_s64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_s64
#define vdupq_laneq_s64(vec, lane) simde_vdupq_laneq_s64((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vdupq_laneq_u8(simde_uint8x16_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
return simde_vdupq_n_u8(simde_uint8x16_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_u8(vec, lane) vdupq_laneq_u8(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_u8(vec, lane) (__extension__ ({ \
simde_uint8x16_private simde_vdupq_laneq_u8_vec_ = simde_uint8x16_to_private(vec); \
simde_uint8x16_private simde_vdupq_laneq_u8_r_; \
simde_vdupq_laneq_u8_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
8, 16, \
simde_vdupq_laneq_u8_vec_.values, \
simde_vdupq_laneq_u8_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_uint8x16_from_private(simde_vdupq_laneq_u8_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_u8
#define vdupq_laneq_u8(vec, lane) simde_vdupq_laneq_u8((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vdupq_laneq_u16(simde_uint16x8_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
return simde_vdupq_n_u16(simde_uint16x8_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_u16(vec, lane) vdupq_laneq_u16(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_u16(vec, lane) (__extension__ ({ \
simde_uint16x8_private simde_vdupq_laneq_u16_vec_ = simde_uint16x8_to_private(vec); \
simde_uint16x8_private simde_vdupq_laneq_u16_r_; \
simde_vdupq_laneq_u16_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
16, 16, \
simde_vdupq_laneq_u16_vec_.values, \
simde_vdupq_laneq_u16_vec_.values, \
lane, lane, lane, lane, lane, lane, lane, lane \
); \
simde_uint16x8_from_private(simde_vdupq_laneq_u16_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_u16
#define vdupq_laneq_u16(vec, lane) simde_vdupq_laneq_u16((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vdupq_laneq_u32(simde_uint32x4_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return simde_vdupq_n_u32(simde_uint32x4_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_u32(vec, lane) vdupq_laneq_u32(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_u32(vec, lane) (__extension__ ({ \
simde_uint32x4_private simde_vdupq_laneq_u32_vec_ = simde_uint32x4_to_private(vec); \
simde_uint32x4_private simde_vdupq_laneq_u32_r_; \
simde_vdupq_laneq_u32_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
32, 16, \
simde_vdupq_laneq_u32_vec_.values, \
simde_vdupq_laneq_u32_vec_.values, \
lane, lane, lane, lane \
); \
simde_uint32x4_from_private(simde_vdupq_laneq_u32_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_u32
#define vdupq_laneq_u32(vec, lane) simde_vdupq_laneq_u32((vec), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vdupq_laneq_u64(simde_uint64x2_t vec, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return simde_vdupq_n_u64(simde_uint64x2_to_private(vec).values[lane]);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vdupq_laneq_u64(vec, lane) vdupq_laneq_u64(vec, lane)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_vdupq_laneq_u64(vec, lane) (__extension__ ({ \
simde_uint64x2_private simde_vdupq_laneq_u64_vec_ = simde_uint64x2_to_private(vec); \
simde_uint64x2_private simde_vdupq_laneq_u64_r_; \
simde_vdupq_laneq_u64_r_.values = \
SIMDE_SHUFFLE_VECTOR_( \
64, 16, \
simde_vdupq_laneq_u64_vec_.values, \
simde_vdupq_laneq_u64_vec_.values, \
lane, lane \
); \
simde_uint64x2_from_private(simde_vdupq_laneq_u64_r_); \
}))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vdupq_laneq_u64
#define vdupq_laneq_u64(vec, lane) simde_vdupq_laneq_u64((vec), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_DUP_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/st4_lane.h | .h | 14,151 | 429 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ST4_LANE_H)
#define SIMDE_ARM_NEON_ST4_LANE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int8x8x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst4_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int8x8_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int8x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_s8
#define vst4_lane_s8(a, b, c) simde_vst4_lane_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int16x4x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst4_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int16x4_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int16x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_s16
#define vst4_lane_s16(a, b, c) simde_vst4_lane_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x2x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst4_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int32x2_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int32x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_s32
#define vst4_lane_s32(a, b, c) simde_vst4_lane_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int64x1x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) lane;
vst4_lane_s64(ptr, val, 0);
#else
simde_int64x1_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int64x1_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_s64
#define vst4_lane_s64(a, b, c) simde_vst4_lane_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint8x8x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst4_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint8x8_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint8x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_u8
#define vst4_lane_u8(a, b, c) simde_vst4_lane_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint16x4x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst4_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint16x4_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint16x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_u16
#define vst4_lane_u16(a, b, c) simde_vst4_lane_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x2x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst4_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint32x2_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint32x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_u32
#define vst4_lane_u32(a, b, c) simde_vst4_lane_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint64x1x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) lane;
vst4_lane_u64(ptr, val, 0);
#else
simde_uint64x1_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint64x1_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_u64
#define vst4_lane_u64(a, b, c) simde_vst4_lane_u64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x2x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst4_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float32x2_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_float32x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_f32
#define vst4_lane_f32(a, b, c) simde_vst4_lane_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float64x1x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) lane;
vst4_lane_f64(ptr, val, 0);
#else
simde_float64x1_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_float64x1_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4_lane_f64
#define vst4_lane_f64(a, b, c) simde_vst4_lane_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int8x16x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_16_NO_RESULT_(vst4q_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int8x16_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int8x16_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_s8
#define vst4q_lane_s8(a, b, c) simde_vst4q_lane_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int16x8x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst4q_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int16x8_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int16x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_s16
#define vst4q_lane_s16(a, b, c) simde_vst4q_lane_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x4x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst4q_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int32x4_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int32x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_s32
#define vst4q_lane_s32(a, b, c) simde_vst4q_lane_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int64x2x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst4q_lane_s64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int64x2_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_int64x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_s64
#define vst4q_lane_s64(a, b, c) simde_vst4q_lane_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint8x16x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_16_NO_RESULT_(vst4q_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint8x16_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint8x16_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_u8
#define vst4q_lane_u8(a, b, c) simde_vst4q_lane_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint16x8x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst4q_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint16x8_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint16x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_u16
#define vst4q_lane_u16(a, b, c) simde_vst4q_lane_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x4x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst4q_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint32x4_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint32x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_u32
#define vst4q_lane_u32(a, b, c) simde_vst4q_lane_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint64x2x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst4q_lane_u64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint64x2_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_uint64x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_u64
#define vst4q_lane_u64(a, b, c) simde_vst4q_lane_u64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x4x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst4q_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float32x4_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_float32x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_f32
#define vst4q_lane_f32(a, b, c) simde_vst4q_lane_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst4q_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float64x2x4_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
(void) lane;
vst4q_lane_f64(ptr, val, 0);
#else
simde_float64x2_private r;
for (size_t i = 0 ; i < 4 ; i++) {
r = simde_float64x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst4q_lane_f64
#define vst4q_lane_f64(a, b, c) simde_vst4q_lane_f64((a), (b), (c))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ST4_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ceq.h | .h | 25,317 | 773 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_CEQ_H)
#define SIMDE_ARM_NEON_CEQ_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vceqh_f16(simde_float16_t a, simde_float16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vceqh_f16(a, b);
#else
return (simde_float16_to_float32(a) == simde_float16_to_float32(b)) ? UINT16_MAX : UINT16_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceqh_f16
#define vceqh_f16(a, b) simde_vceqh_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vceqs_f32(simde_float32_t a, simde_float32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceqs_f32(a, b);
#else
return (a == b) ? ~UINT32_C(0) : UINT32_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceqs_f32
#define vceqs_f32(a, b) simde_vceqs_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vceqd_f64(simde_float64_t a, simde_float64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceqd_f64(a, b);
#else
return (a == b) ? ~UINT64_C(0) : UINT64_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceqd_f64
#define vceqd_f64(a, b) simde_vceqd_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vceqd_s64(int64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint64_t, vceqd_s64(a, b));
#else
return (a == b) ? ~UINT64_C(0) : UINT64_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceqd_s64
#define vceqd_s64(a, b) simde_vceqd_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vceqd_u64(uint64_t a, uint64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceqd_u64(a, b);
#else
return (a == b) ? ~UINT64_C(0) : UINT64_C(0);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceqd_u64
#define vceqd_u64(a, b) simde_vceqd_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vceq_f16(simde_float16x4_t a, simde_float16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vceq_f16(a, b);
#else
simde_uint16x4_private r_;
simde_float16x4_private
a_ = simde_float16x4_to_private(a),
b_ = simde_float16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vceqh_f16(a_.values[i], b_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vceq_f16
#define vceq_f16(a, b) simde_vceq_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vceq_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_f32(a, b);
#else
simde_uint32x2_private r_;
simde_float32x2_private
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_f32
#define vceq_f32(a, b) simde_vceq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vceq_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceq_f64(a, b);
#else
simde_uint64x1_private r_;
simde_float64x1_private
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceq_f64
#define vceq_f64(a, b) simde_vceq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vceq_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_s8(a, b);
#else
simde_uint8x8_private r_;
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_cmpeq_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT8_C(0) : UINT8_C(0);
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_s8
#define vceq_s8(a, b) simde_vceq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vceq_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_s16(a, b);
#else
simde_uint16x4_private r_;
simde_int16x4_private
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_cmpeq_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT16_C(0) : UINT16_C(0);
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_s16
#define vceq_s16(a, b) simde_vceq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vceq_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_s32(a, b);
#else
simde_uint32x2_private r_;
simde_int32x2_private
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_cmpeq_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_s32
#define vceq_s32(a, b) simde_vceq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vceq_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceq_s64(a, b);
#else
simde_uint64x1_private r_;
simde_int64x1_private
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_s64
#define vceq_s64(a, b) simde_vceq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vceq_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_u8(a, b);
#else
simde_uint8x8_private r_;
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT8_C(0) : UINT8_C(0);
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_u8
#define vceq_u8(a, b) simde_vceq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vceq_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_u16(a, b);
#else
simde_uint16x4_private r_;
simde_uint16x4_private
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT16_C(0) : UINT16_C(0);
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_u16
#define vceq_u16(a, b) simde_vceq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vceq_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceq_u32(a, b);
#else
simde_uint32x2_private r_;
simde_uint32x2_private
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_u32
#define vceq_u32(a, b) simde_vceq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vceq_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceq_u64(a, b);
#else
simde_uint64x1_private r_;
simde_uint64x1_private
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceq_u64
#define vceq_u64(a, b) simde_vceq_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vceqq_f16(simde_float16x8_t a, simde_float16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vceqq_f16(a, b);
#else
simde_uint16x8_private r_;
simde_float16x8_private
a_ = simde_float16x8_to_private(a),
b_ = simde_float16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vceqh_f16(a_.values[i], b_.values[i]);
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vceqq_f16
#define vceqq_f16(a, b) simde_vceqq_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vceqq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_f32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpeq(a, b));
#else
simde_uint32x4_private r_;
simde_float32x4_private
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_cmpeq_ps(a_.m128, b_.m128));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f32x4_eq(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_f32
#define vceqq_f32(a, b) simde_vceqq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vceqq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceqq_f64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpeq(a, b));
#else
simde_uint64x2_private r_;
simde_float64x2_private
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castpd_si128(_mm_cmpeq_pd(a_.m128d, b_.m128d));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f64x2_eq(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vceqq_f64
#define vceqq_f64(a, b) simde_vceqq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vceqq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpeq(a, b));
#else
simde_uint8x16_private r_;
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_cmpeq_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_eq(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT8_C(0) : UINT8_C(0);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_s8
#define vceqq_s8(a, b) simde_vceqq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vceqq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpeq(a, b));
#else
simde_uint16x8_private r_;
simde_int16x8_private
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_cmpeq_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_eq(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT16_C(0) : UINT16_C(0);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_s16
#define vceqq_s16(a, b) simde_vceqq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vceqq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpeq(a, b));
#else
simde_uint32x4_private r_;
simde_int32x4_private
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_cmpeq_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_eq(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_s32
#define vceqq_s32(a, b) simde_vceqq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vceqq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceqq_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpeq(a, b));
#else
simde_uint64x2_private r_;
simde_int64x2_private
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_cmpeq_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_s64
#define vceqq_s64(a, b) simde_vceqq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vceqq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpeq(a, b));
#else
simde_uint8x16_private r_;
simde_uint8x16_private
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_cmpeq_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT8_C(0) : UINT8_C(0);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_u8
#define vceqq_u8(a, b) simde_vceqq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vceqq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpeq(a, b));
#else
simde_uint16x8_private r_;
simde_uint16x8_private
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_cmpeq_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT16_C(0) : UINT16_C(0);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_u16
#define vceqq_u16(a, b) simde_vceqq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vceqq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vceqq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpeq(a, b));
#else
simde_uint32x4_private r_;
simde_uint32x4_private
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_cmpeq_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_u32
#define vceqq_u32(a, b) simde_vceqq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vceqq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vceqq_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpeq(a, b));
#else
simde_uint64x2_private r_;
simde_uint64x2_private
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i = _mm_cmpeq_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] == b_.values[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vceqq_u64
#define vceqq_u64(a, b) simde_vceqq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CEQ_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/padal.h | .h | 6,129 | 212 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_PADAL_H)
#define SIMDE_ARM_NEON_PADAL_H
#include "types.h"
#include "add.h"
#include "paddl.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vpadal_s8(simde_int16x4_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadal_s8(a, b);
#else
return simde_vadd_s16(a, simde_vpaddl_s8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadal_s8
#define vpadal_s8(a, b) simde_vpadal_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vpadal_s16(simde_int32x2_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadal_s16(a, b);
#else
return simde_vadd_s32(a, simde_vpaddl_s16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadal_s16
#define vpadal_s16(a, b) simde_vpadal_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vpadal_s32(simde_int64x1_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadal_s32(a, b);
#else
return simde_vadd_s64(a, simde_vpaddl_s32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadal_s32
#define vpadal_s32(a, b) simde_vpadal_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vpadal_u8(simde_uint16x4_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadal_u8(a, b);
#else
return simde_vadd_u16(a, simde_vpaddl_u8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadal_u8
#define vpadal_u8(a, b) simde_vpadal_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vpadal_u16(simde_uint32x2_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadal_u16(a, b);
#else
return simde_vadd_u32(a, simde_vpaddl_u16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadal_u16
#define vpadal_u16(a, b) simde_vpadal_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vpadal_u32(simde_uint64x1_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadal_u32(a, b);
#else
return simde_vadd_u64(a, simde_vpaddl_u32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadal_u32
#define vpadal_u32(a, b) simde_vpadal_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vpadalq_s8(simde_int16x8_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadalq_s8(a, b);
#else
return simde_vaddq_s16(a, simde_vpaddlq_s8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadalq_s8
#define vpadalq_s8(a, b) simde_vpadalq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vpadalq_s16(simde_int32x4_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadalq_s16(a, b);
#else
return simde_vaddq_s32(a, simde_vpaddlq_s16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadalq_s16
#define vpadalq_s16(a, b) simde_vpadalq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vpadalq_s32(simde_int64x2_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadalq_s32(a, b);
#else
return simde_vaddq_s64(a, simde_vpaddlq_s32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadalq_s32
#define vpadalq_s32(a, b) simde_vpadalq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vpadalq_u8(simde_uint16x8_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadalq_u8(a, b);
#else
return simde_vaddq_u16(a, simde_vpaddlq_u8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadalq_u8
#define vpadalq_u8(a, b) simde_vpadalq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vpadalq_u16(simde_uint32x4_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadalq_u16(a, b);
#else
return simde_vaddq_u32(a, simde_vpaddlq_u16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadalq_u16
#define vpadalq_u16(a, b) simde_vpadalq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vpadalq_u32(simde_uint64x2_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpadalq_u32(a, b);
#else
return simde_vaddq_u64(a, simde_vpaddlq_u32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpadalq_u32
#define vpadalq_u32(a, b) simde_vpadalq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* SIMDE_ARM_NEON_PADAL_H */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qmovn.h | .h | 8,266 | 274 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_QMOVN_H)
#define SIMDE_ARM_NEON_QMOVN_H
#include "types.h"
#include "dup_n.h"
#include "min.h"
#include "max.h"
#include "movn.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vqmovnh_s16(int16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqmovnh_s16(a);
#else
return (a > INT8_MAX) ? INT8_MAX : ((a < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovnh_s16
#define vqmovnh_s16(a) simde_vqmovnh_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vqmovns_s32(int32_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqmovns_s32(a);
#else
return (a > INT16_MAX) ? INT16_MAX : ((a < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovns_s32
#define vqmovns_s32(a) simde_vqmovns_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vqmovnd_s64(int64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqmovnd_s64(a);
#else
return (a > INT32_MAX) ? INT32_MAX : ((a < INT32_MIN) ? INT32_MIN : HEDLEY_STATIC_CAST(int32_t, a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovnd_s64
#define vqmovnd_s64(a) simde_vqmovnd_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vqmovnh_u16(uint16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqmovnh_u16(a);
#else
return (a > UINT8_MAX) ? UINT8_MAX : HEDLEY_STATIC_CAST(uint8_t, a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovnh_u16
#define vqmovnh_u16(a) simde_vqmovnh_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vqmovns_u32(uint32_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqmovns_u32(a);
#else
return (a > UINT16_MAX) ? UINT16_MAX : HEDLEY_STATIC_CAST(uint16_t, a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovns_u32
#define vqmovns_u32(a) simde_vqmovns_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vqmovnd_u64(uint64_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqmovnd_u64(a);
#else
return (a > UINT32_MAX) ? UINT32_MAX : HEDLEY_STATIC_CAST(uint32_t, a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqmovnd_u64
#define vqmovnd_u64(a) simde_vqmovnd_u64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqmovn_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovn_s16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_s16(simde_vmaxq_s16(simde_vdupq_n_s16(INT8_MIN), simde_vminq_s16(simde_vdupq_n_s16(INT8_MAX), a)));
#else
simde_int8x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovnh_s16(a_.values[i]);
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovn_s16
#define vqmovn_s16(a) simde_vqmovn_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vqmovn_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovn_s32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_s32(simde_vmaxq_s32(simde_vdupq_n_s32(INT16_MIN), simde_vminq_s32(simde_vdupq_n_s32(INT16_MAX), a)));
#else
simde_int16x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovns_s32(a_.values[i]);
}
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovn_s32
#define vqmovn_s32(a) simde_vqmovn_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vqmovn_s64(simde_int64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovn_s64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_s64(simde_x_vmaxq_s64(simde_vdupq_n_s64(INT32_MIN), simde_x_vminq_s64(simde_vdupq_n_s64(INT32_MAX), a)));
#else
simde_int32x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovnd_s64(a_.values[i]);
}
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovn_s64
#define vqmovn_s64(a) simde_vqmovn_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqmovn_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovn_u16(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_u16(simde_vminq_u16(a, simde_vdupq_n_u16(UINT8_MAX)));
#else
simde_uint8x8_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovnh_u16(a_.values[i]);
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovn_u16
#define vqmovn_u16(a) simde_vqmovn_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vqmovn_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovn_u32(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_u32(simde_vminq_u32(a, simde_vdupq_n_u32(UINT16_MAX)));
#else
simde_uint16x4_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovns_u32(a_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovn_u32
#define vqmovn_u32(a) simde_vqmovn_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vqmovn_u64(simde_uint64x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqmovn_u64(a);
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
return simde_vmovn_u64(simde_x_vminq_u64(a, simde_vdupq_n_u64(UINT32_MAX)));
#else
simde_uint32x2_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqmovnd_u64(a_.values[i]);
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqmovn_u64
#define vqmovn_u64(a) simde_vqmovn_u64((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QMOVN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/subw_high.h | .h | 7,930 | 222 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_SUBW_HIGH_H)
#define SIMDE_ARM_NEON_SUBW_HIGH_H
#include "types.h"
#include "movl_high.h"
#include "sub.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vsubw_high_s8(simde_int16x8_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubw_high_s8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vsubq_s16(a, simde_vmovl_high_s8(b));
#else
simde_int16x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_int8x16_private b_ = simde_int8x16_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values -= a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubw_high_s8
#define vsubw_high_s8(a, b) simde_vsubw_high_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vsubw_high_s16(simde_int32x4_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubw_high_s16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vsubq_s32(a, simde_vmovl_high_s16(b));
#else
simde_int32x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_int16x8_private b_ = simde_int16x8_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values -= a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubw_high_s16
#define vsubw_high_s16(a, b) simde_vsubw_high_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vsubw_high_s32(simde_int64x2_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubw_high_s32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vsubq_s64(a, simde_vmovl_high_s32(b));
#else
simde_int64x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_int32x4_private b_ = simde_int32x4_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values -= a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubw_high_s32
#define vsubw_high_s32(a, b) simde_vsubw_high_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vsubw_high_u8(simde_uint16x8_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubw_high_u8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vsubq_u16(a, simde_vmovl_high_u8(b));
#else
simde_uint16x8_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_uint8x16_private b_ = simde_uint8x16_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values -= a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubw_high_u8
#define vsubw_high_u8(a, b) simde_vsubw_high_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vsubw_high_u16(simde_uint32x4_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubw_high_u16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vsubq_u32(a, simde_vmovl_high_u16(b));
#else
simde_uint32x4_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_uint16x8_private b_ = simde_uint16x8_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values -= a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubw_high_u16
#define vsubw_high_u16(a, b) simde_vsubw_high_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vsubw_high_u32(simde_uint64x2_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubw_high_u32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vsubq_u64(a, simde_vmovl_high_u32(b));
#else
simde_uint64x2_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_uint32x4_private b_ = simde_uint32x4_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values -= a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] - b_.values[i + ((sizeof(b_.values) / sizeof(b_.values[0])) / 2)];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubw_high_u32
#define vsubw_high_u32(a, b) simde_vsubw_high_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SUBW_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qdmulh_n.h | .h | 2,873 | 81 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_QDMULH_N_H)
#define SIMDE_ARM_NEON_QDMULH_N_H
#include "qdmulh.h"
#include "dup_n.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulh_n_s16(a, b) vqdmulh_n_s16((a), (b))
#else
#define simde_vqdmulh_n_s16(a, b) simde_vqdmulh_s16((a), simde_vdup_n_s16(b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_n_s16
#define vqdmulh_n_s16(a, b) simde_vqdmulh_n_s16((a), (b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulh_n_s32(a, b) vqdmulh_n_s32((a), (b))
#else
#define simde_vqdmulh_n_s32(a, b) simde_vqdmulh_s32((a), simde_vdup_n_s32(b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_n_s32
#define vqdmulh_n_s32(a, b) simde_vqdmulh_n_s32((a), (b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulhq_n_s16(a, b) vqdmulhq_n_s16((a), (b))
#else
#define simde_vqdmulhq_n_s16(a, b) simde_vqdmulhq_s16((a), simde_vdupq_n_s16(b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_n_s16
#define vqdmulhq_n_s16(a, b) simde_vqdmulhq_n_s16((a), (b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqdmulhq_n_s32(a, b) vqdmulhq_n_s32((a), (b))
#else
#define simde_vqdmulhq_n_s32(a, b) simde_vqdmulhq_s32((a), simde_vdupq_n_s32(b))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_n_s32
#define vqdmulhq_n_s32(a, b) simde_vqdmulhq_n_s32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QDMULH_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/sqadd.h | .h | 10,250 | 330 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_SQADD_H)
#define SIMDE_ARM_NEON_SQADD_H
#include "types.h"
#include <limits.h>
// Workaround on ARM64 windows due to windows SDK bug
// https://developercommunity.visualstudio.com/t/In-arm64_neonh-vsqaddb_u8-vsqaddh_u16/10271747?sort=newest
#if (defined _MSC_VER) && (defined SIMDE_ARM_NEON_A64V8_NATIVE)
#undef vsqaddb_u8
#define vsqaddb_u8(src1, src2) neon_usqadds8(__uint8ToN8_v(src1), __int8ToN8_v(src2)).n8_u8[0]
#undef vsqaddh_u16
#define vsqaddh_u16(src1, src2) neon_usqadds16(__uint16ToN16_v(src1), __int16ToN16_v(src2)).n16_u16[0]
#undef vsqadds_u32
#define vsqadds_u32(src1, src2) _CopyUInt32FromFloat(neon_usqadds32(_CopyFloatFromUInt32(src1), _CopyFloatFromInt32(src2)))
#undef vsqaddd_u64
#define vsqaddd_u64(src1, src2) neon_usqadds64(__uint64ToN64_v(src1), __int64ToN64_v(src2)).n64_u64[0]
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vsqaddb_u8(uint8_t a, int8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_REV_365298)
return vsqaddb_u8(a, HEDLEY_STATIC_CAST(uint8_t, b));
#else
return vsqaddb_u8(a, b);
#endif
#else
int16_t r_ = HEDLEY_STATIC_CAST(int16_t, a) + HEDLEY_STATIC_CAST(int16_t, b);
return (r_ < 0) ? 0 : ((r_ > UINT8_MAX) ? UINT8_MAX : HEDLEY_STATIC_CAST(uint8_t, r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddb_u8
#define vsqaddb_u8(a, b) simde_vsqaddb_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vsqaddh_u16(uint16_t a, int16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_REV_365298)
return vsqaddh_u16(a, HEDLEY_STATIC_CAST(uint16_t, b));
#else
return vsqaddh_u16(a, b);
#endif
#else
int32_t r_ = HEDLEY_STATIC_CAST(int32_t, a) + HEDLEY_STATIC_CAST(int32_t, b);
return (r_ < 0) ? 0 : ((r_ > UINT16_MAX) ? UINT16_MAX : HEDLEY_STATIC_CAST(uint16_t, r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddh_u16
#define vsqaddh_u16(a, b) simde_vsqaddh_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vsqadds_u32(uint32_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_REV_365298)
return vsqadds_u32(a, HEDLEY_STATIC_CAST(uint32_t, b));
#else
return vsqadds_u32(a, b);
#endif
#else
int64_t r_ = HEDLEY_STATIC_CAST(int64_t, a) + HEDLEY_STATIC_CAST(int64_t, b);
return (r_ < 0) ? 0 : ((r_ > UINT32_MAX) ? UINT32_MAX : HEDLEY_STATIC_CAST(uint32_t, r_));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqadds_u32
#define vsqadds_u32(a, b) simde_vsqadds_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vsqaddd_u64(uint64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_REV_365298)
return vsqaddd_u64(a, HEDLEY_STATIC_CAST(uint64_t, b));
#else
return vsqaddd_u64(a, b);
#endif
#else
uint64_t r_;
if (b > 0) {
uint64_t ub = HEDLEY_STATIC_CAST(uint64_t, b);
r_ = ((UINT64_MAX - a) < ub) ? UINT64_MAX : a + ub;
} else {
uint64_t nb = HEDLEY_STATIC_CAST(uint64_t, -b);
r_ = (nb > a) ? 0 : a - nb;
}
return r_;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddd_u64
#define vsqaddd_u64(a, b) simde_vsqaddd_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vsqadd_u8(simde_uint8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqadd_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a);
simde_int8x8_private b_ = simde_int8x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqaddb_u8(a_.values[i], b_.values[i]);
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqadd_u8
#define vsqadd_u8(a, b) simde_vsqadd_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vsqadd_u16(simde_uint16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqadd_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a);
simde_int16x4_private b_ = simde_int16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqaddh_u16(a_.values[i], b_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqadd_u16
#define vsqadd_u16(a, b) simde_vsqadd_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vsqadd_u32(simde_uint32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqadd_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a);
simde_int32x2_private b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqadds_u32(a_.values[i], b_.values[i]);
}
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqadd_u32
#define vsqadd_u32(a, b) simde_vsqadd_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vsqadd_u64(simde_uint64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqadd_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a);
simde_int64x1_private b_ = simde_int64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqaddd_u64(a_.values[i], b_.values[i]);
}
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqadd_u64
#define vsqadd_u64(a, b) simde_vsqadd_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vsqaddq_u8(simde_uint8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqaddq_u8(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a);
simde_int8x16_private b_ = simde_int8x16_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqaddb_u8(a_.values[i], b_.values[i]);
}
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddq_u8
#define vsqaddq_u8(a, b) simde_vsqaddq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vsqaddq_u16(simde_uint16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqaddq_u16(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a);
simde_int16x8_private b_ = simde_int16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqaddh_u16(a_.values[i], b_.values[i]);
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddq_u16
#define vsqaddq_u16(a, b) simde_vsqaddq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vsqaddq_u32(simde_uint32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqaddq_u32(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a);
simde_int32x4_private b_ = simde_int32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqadds_u32(a_.values[i], b_.values[i]);
}
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddq_u32
#define vsqaddq_u32(a, b) simde_vsqaddq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vsqaddq_u64(simde_uint64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsqaddq_u64(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a);
simde_int64x2_private b_ = simde_int64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vsqaddd_u64(a_.values[i], b_.values[i]);
}
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsqaddq_u64
#define vsqaddq_u64(a, b) simde_vsqaddq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SQADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/hsub.h | .h | 10,597 | 311 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
/* TODO: the 128-bit versions only require AVX-512 because of the final
* conversions from larger types down to smaller ones. We could get
* the same results from AVX/AVX2 instructions with some shuffling
* to extract the low half of each input element to the low half
* of a 256-bit vector, then cast that to a 128-bit vector. */
#if !defined(SIMDE_ARM_NEON_HSUB_H)
#define SIMDE_ARM_NEON_HSUB_H
#include "subl.h"
#include "shr_n.h"
#include "movn.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vhsub_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsub_s8(a, b);
#else
return simde_vmovn_s16(simde_vshrq_n_s16(simde_vsubl_s8(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsub_s8
#define vhsub_s8(a, b) simde_vhsub_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vhsub_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsub_s16(a, b);
#else
return simde_vmovn_s32(simde_vshrq_n_s32(simde_vsubl_s16(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsub_s16
#define vhsub_s16(a, b) simde_vhsub_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vhsub_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsub_s32(a, b);
#else
return simde_vmovn_s64(simde_vshrq_n_s64(simde_vsubl_s32(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsub_s32
#define vhsub_s32(a, b) simde_vhsub_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vhsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsub_u8(a, b);
#else
return simde_vmovn_u16(simde_vshrq_n_u16(simde_vsubl_u8(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsub_u8
#define vhsub_u8(a, b) simde_vhsub_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vhsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsub_u16(a, b);
#else
return simde_vmovn_u32(simde_vshrq_n_u32(simde_vsubl_u16(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsub_u16
#define vhsub_u16(a, b) simde_vhsub_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vhsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsub_u32(a, b);
#else
return simde_vmovn_u64(simde_vshrq_n_u64(simde_vsubl_u32(a, b), 1));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsub_u32
#define vhsub_u32(a, b) simde_vhsub_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vhsubq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsubq_s8(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
r_.m128i = _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_sub_epi16(_mm256_cvtepi8_epi16(a_.m128i), _mm256_cvtepi8_epi16(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1);
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsubq_s8
#define vhsubq_s8(a, b) simde_vhsubq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vhsubq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsubq_s16(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_sub_epi32(_mm256_cvtepi16_epi32(a_.m128i), _mm256_cvtepi16_epi32(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1);
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsubq_s16
#define vhsubq_s16(a, b) simde_vhsubq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vhsubq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsubq_s32(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_sub_epi64(_mm256_cvtepi32_epi64(a_.m128i), _mm256_cvtepi32_epi64(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1);
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsubq_s32
#define vhsubq_s32(a, b) simde_vhsubq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vhsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsubq_u8(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
r_.m128i = _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_sub_epi16(_mm256_cvtepu8_epi16(a_.m128i), _mm256_cvtepu8_epi16(b_.m128i)), 1));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t lo =
wasm_u16x8_shr(wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(a_.v128),
wasm_u16x8_extend_low_u8x16(b_.v128)),
1);
v128_t hi =
wasm_u16x8_shr(wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(a_.v128),
wasm_u16x8_extend_high_u8x16(b_.v128)),
1);
r_.v128 = wasm_i8x16_shuffle(lo, hi, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
22, 24, 26, 28, 30);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsubq_u8
#define vhsubq_u8(a, b) simde_vhsubq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vhsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsubq_u16(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_sub_epi32(_mm256_cvtepu16_epi32(a_.m128i), _mm256_cvtepu16_epi32(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsubq_u16
#define vhsubq_u16(a, b) simde_vhsubq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vhsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vhsubq_u32(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_sub_epi64(_mm256_cvtepu32_epi64(a_.m128i), _mm256_cvtepu32_epi64(b_.m128i)), 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vhsubq_u32
#define vhsubq_u32(a, b) simde_vhsubq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_HSUB_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qdmulh.h | .h | 5,265 | 161 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_QDMULH_H)
#define SIMDE_ARM_NEON_QDMULH_H
#include "types.h"
#include "combine.h"
#include "get_high.h"
#include "get_low.h"
#include "qdmull.h"
#include "reinterpret.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vqdmulhs_s32(int32_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqdmulhs_s32(a, b);
#else
int64_t tmp = simde_vqdmulls_s32(a, b);
return HEDLEY_STATIC_CAST(int32_t, tmp >> 32);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqdmulhs_s32
#define vqdmulhs_s32(a) simde_vqdmulhs_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vqdmulh_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqdmulh_s16(a, b);
#else
simde_int16x4_private r_;
#if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !(HEDLEY_GCC_VERSION_CHECK(12,1,0) && defined(SIMDE_ARCH_ZARCH))
simde_int16x8_private tmp_ =
simde_int16x8_to_private(
simde_vreinterpretq_s16_s32(
simde_vqdmull_s16(a, b)
)
);
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7);
#else
simde_int32x4_private tmp = simde_int32x4_to_private(simde_vqdmull_s16(a, b));
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, tmp.values[i] >> 16);
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_s16
#define vqdmulh_s16(a, b) simde_vqdmulh_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vqdmulh_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqdmulh_s32(a, b);
#else
simde_int32x2_private r_;
#if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && !(HEDLEY_GCC_VERSION_CHECK(12,1,0) && defined(SIMDE_ARCH_ZARCH))
simde_int32x4_private tmp_ =
simde_int32x4_to_private(
simde_vreinterpretq_s32_s64(
simde_vqdmull_s32(a, b)
)
);
r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3);
#else
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_int32x2_private b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqdmulhs_s32(a_.values[i], b_.values[i]);
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulh_s32
#define vqdmulh_s32(a, b) simde_vqdmulh_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vqdmulhq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqdmulhq_s16(a, b);
#else
return simde_vcombine_s16(simde_vqdmulh_s16(simde_vget_low_s16(a), simde_vget_low_s16(b)),
simde_vqdmulh_s16(simde_vget_high_s16(a), simde_vget_high_s16(b)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_s16
#define vqdmulhq_s16(a, b) simde_vqdmulhq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vqdmulhq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqdmulhq_s32(a, b);
#else
return simde_vcombine_s32(simde_vqdmulh_s32(simde_vget_low_s32(a), simde_vget_low_s32(b)),
simde_vqdmulh_s32(simde_vget_high_s32(a), simde_vget_high_s32(b)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqdmulhq_s32
#define vqdmulhq_s32(a, b) simde_vqdmulhq_s32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QDMULH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/minv.h | .h | 10,751 | 425 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MINV_H)
#define SIMDE_ARM_NEON_MINV_H
#include "types.h"
#include <float.h>
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vminv_f32(simde_float32x2_t a) {
simde_float32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_f32(a);
#else
simde_float32x2_private a_ = simde_float32x2_to_private(a);
r = SIMDE_MATH_INFINITYF;
#if defined(SIMDE_FAST_NANS)
SIMDE_VECTORIZE_REDUCTION(min:r)
#else
SIMDE_VECTORIZE
#endif
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
#if defined(SIMDE_FAST_NANS)
r = a_.values[i] < r ? a_.values[i] : r;
#else
r = (a_.values[i] < r) ? a_.values[i] : ((a_.values[i] >= r) ? r : ((a_.values[i] == a_.values[i]) ? r : a_.values[i]));
#endif
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_f32
#define vminv_f32(v) simde_vminv_f32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vminv_s8(simde_int8x8_t a) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_s8(a);
#else
simde_int8x8_private a_ = simde_int8x8_to_private(a);
r = INT8_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_s8
#define vminv_s8(v) simde_vminv_s8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vminv_s16(simde_int16x4_t a) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_s16(a);
#else
simde_int16x4_private a_ = simde_int16x4_to_private(a);
r = INT16_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_s16
#define vminv_s16(v) simde_vminv_s16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vminv_s32(simde_int32x2_t a) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_s32(a);
#else
simde_int32x2_private a_ = simde_int32x2_to_private(a);
r = INT32_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_s32
#define vminv_s32(v) simde_vminv_s32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vminv_u8(simde_uint8x8_t a) {
uint8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_u8(a);
#else
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
r = UINT8_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_u8
#define vminv_u8(v) simde_vminv_u8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vminv_u16(simde_uint16x4_t a) {
uint16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_u16(a);
#else
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
r = UINT16_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_u16
#define vminv_u16(v) simde_vminv_u16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vminv_u32(simde_uint32x2_t a) {
uint32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminv_u32(a);
#else
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
r = UINT32_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminv_u32
#define vminv_u32(v) simde_vminv_u32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vminvq_f32(simde_float32x4_t a) {
simde_float32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_f32(a);
#else
simde_float32x4_private a_ = simde_float32x4_to_private(a);
r = SIMDE_MATH_INFINITYF;
#if defined(SIMDE_FAST_NANS)
SIMDE_VECTORIZE_REDUCTION(min:r)
#else
SIMDE_VECTORIZE
#endif
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
#if defined(SIMDE_FAST_NANS)
r = a_.values[i] < r ? a_.values[i] : r;
#else
r = (a_.values[i] < r) ? a_.values[i] : ((a_.values[i] >= r) ? r : ((a_.values[i] == a_.values[i]) ? r : a_.values[i]));
#endif
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_f32
#define vminvq_f32(v) simde_vminvq_f32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vminvq_f64(simde_float64x2_t a) {
simde_float64_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_f64(a);
#else
simde_float64x2_private a_ = simde_float64x2_to_private(a);
r = SIMDE_MATH_INFINITY;
#if defined(SIMDE_FAST_NANS)
SIMDE_VECTORIZE_REDUCTION(min:r)
#else
SIMDE_VECTORIZE
#endif
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
#if defined(SIMDE_FAST_NANS)
r = a_.values[i] < r ? a_.values[i] : r;
#else
r = (a_.values[i] < r) ? a_.values[i] : ((a_.values[i] >= r) ? r : ((a_.values[i] == a_.values[i]) ? r : a_.values[i]));
#endif
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_f64
#define vminvq_f64(v) simde_vminvq_f64(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vminvq_s8(simde_int8x16_t a) {
int8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_s8(a);
#else
simde_int8x16_private a_ = simde_int8x16_to_private(a);
r = INT8_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_s8
#define vminvq_s8(v) simde_vminvq_s8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vminvq_s16(simde_int16x8_t a) {
int16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_s16(a);
#else
simde_int16x8_private a_ = simde_int16x8_to_private(a);
r = INT16_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_s16
#define vminvq_s16(v) simde_vminvq_s16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vminvq_s32(simde_int32x4_t a) {
int32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_s32(a);
#else
simde_int32x4_private a_ = simde_int32x4_to_private(a);
r = INT32_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_s32
#define vminvq_s32(v) simde_vminvq_s32(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vminvq_u8(simde_uint8x16_t a) {
uint8_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_u8(a);
#else
simde_uint8x16_private a_ = simde_uint8x16_to_private(a);
r = UINT8_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_u8
#define vminvq_u8(v) simde_vminvq_u8(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vminvq_u16(simde_uint16x8_t a) {
uint16_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_u16(a);
#else
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
r = UINT16_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_u16
#define vminvq_u16(v) simde_vminvq_u16(v)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vminvq_u32(simde_uint32x4_t a) {
uint32_t r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vminvq_u32(a);
#else
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
r = UINT32_MAX;
SIMDE_VECTORIZE_REDUCTION(min:r)
for (size_t i = 0 ; i < (sizeof(a_.values) / sizeof(a_.values[0])) ; i++) {
r = a_.values[i] < r ? a_.values[i] : r;
}
#endif
return r;
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vminvq_u32
#define vminvq_u32(v) simde_vminvq_u32(v)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MINV_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/subl_high.h | .h | 4,068 | 127 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Décio Luiz Gazzoni Filho <decio@decpp.net>
*/
#if !defined(SIMDE_ARM_NEON_SUBL_HIGH_H)
#define SIMDE_ARM_NEON_SUBL_HIGH_H
#include "sub.h"
#include "movl.h"
#include "movl_high.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vsubl_high_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubl_high_s8(a, b);
#else
return simde_vsubq_s16(simde_vmovl_high_s8(a), simde_vmovl_high_s8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubl_high_s8
#define vsubl_high_s8(a, b) simde_vsubl_high_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vsubl_high_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubl_high_s16(a, b);
#else
return simde_vsubq_s32(simde_vmovl_high_s16(a), simde_vmovl_high_s16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubl_high_s16
#define vsubl_high_s16(a, b) simde_vsubl_high_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vsubl_high_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubl_high_s32(a, b);
#else
return simde_vsubq_s64(simde_vmovl_high_s32(a), simde_vmovl_high_s32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubl_high_s32
#define vsubl_high_s32(a, b) simde_vsubl_high_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vsubl_high_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubl_high_u8(a, b);
#else
return simde_vsubq_u16(simde_vmovl_high_u8(a), simde_vmovl_high_u8(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubl_high_u8
#define vsubl_high_u8(a, b) simde_vsubl_high_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vsubl_high_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubl_high_u16(a, b);
#else
return simde_vsubq_u32(simde_vmovl_high_u16(a), simde_vmovl_high_u16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubl_high_u16
#define vsubl_high_u16(a, b) simde_vsubl_high_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vsubl_high_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsubl_high_u32(a, b);
#else
return simde_vsubq_u64(simde_vmovl_high_u32(a), simde_vmovl_high_u32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vsubl_high_u32
#define vsubl_high_u32(a, b) simde_vsubl_high_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SUBL_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qshlu_n.h | .h | 17,502 | 438 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_QSHLU_N_H)
#define SIMDE_ARM_NEON_QSHLU_N_H
#include "types.h"
#if defined(SIMDE_WASM_SIMD128_NATIVE)
#include "reinterpret.h"
#include "movl.h"
#include "movn.h"
#include "combine.h"
#include "get_low.h"
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vqshlub_n_s8(int8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
uint8_t r = HEDLEY_STATIC_CAST(uint8_t, a << n);
r |= (((r >> n) != HEDLEY_STATIC_CAST(uint8_t, a)) ? UINT8_MAX : 0);
return (a < 0) ? 0 : r;
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqshlub_n_s8(a, n) HEDLEY_STATIC_CAST(uint8_t, vqshlub_n_s8(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlub_n_s8
#define vqshlub_n_s8(a, n) simde_vqshlub_n_s8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vqshlus_n_s32(int32_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) {
uint32_t r = HEDLEY_STATIC_CAST(uint32_t, a << n);
r |= (((r >> n) != HEDLEY_STATIC_CAST(uint32_t, a)) ? UINT32_MAX : 0);
return (a < 0) ? 0 : r;
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqshlus_n_s32(a, n) HEDLEY_STATIC_CAST(uint32_t, vqshlus_n_s32(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlus_n_s32
#define vqshlus_n_s32(a, n) simde_vqshlus_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vqshlud_n_s64(int64_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) {
uint32_t r = HEDLEY_STATIC_CAST(uint32_t, a << n);
r |= (((r >> n) != HEDLEY_STATIC_CAST(uint32_t, a)) ? UINT32_MAX : 0);
return (a < 0) ? 0 : r;
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqshlud_n_s64(a, n) HEDLEY_STATIC_CAST(uint64_t, vqshlud_n_s64(a, n))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqshlud_n_s64
#define vqshlud_n_s64(a, n) simde_vqshlud_n_s64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqshlu_n_s8(simde_int8x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
#if defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int16x8_private
R_,
A_ = simde_int16x8_to_private(simde_vmovl_s8(a));
const v128_t shifted = wasm_i16x8_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
R_.v128 = wasm_i16x8_min(shifted, wasm_i16x8_const_splat(UINT8_MAX));
R_.v128 = wasm_i16x8_max(R_.v128, wasm_i16x8_const_splat(0));
return simde_vmovn_u16(simde_vreinterpretq_u16_s16( simde_int16x8_from_private(R_)));
#else
simde_int8x8_private a_ = simde_int8x8_to_private(a);
simde_uint8x8_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint8_t, a_.values[i])) ? UINT8_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshlu_n_s8(a, n) vqshlu_n_s8(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlu_n_s8
#define vqshlu_n_s8(a, n) simde_vqshlu_n_s8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vqshlu_n_s16(simde_int16x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) {
#if defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int32x4_private
R_,
A_ = simde_int32x4_to_private(simde_vmovl_s16(a));
const v128_t shifted = wasm_i32x4_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
R_.v128 = wasm_i32x4_min(shifted, wasm_i32x4_const_splat(UINT16_MAX));
R_.v128 = wasm_i32x4_max(R_.v128, wasm_i32x4_const_splat(0));
return simde_vmovn_u32(simde_vreinterpretq_u32_s32( simde_int32x4_from_private(R_)));
#else
simde_int16x4_private a_ = simde_int16x4_to_private(a);
simde_uint16x4_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint16_t, a_.values[i])) ? UINT16_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshlu_n_s16(a, n) vqshlu_n_s16(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlu_n_s16
#define vqshlu_n_s16(a, n) simde_vqshlu_n_s16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vqshlu_n_s32(simde_int32x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) {
#if defined(SIMDE_WASM_SIMD128_NATIVE)
simde_int64x2_private
R_,
A_ = simde_int64x2_to_private(simde_vmovl_s32(a));
const v128_t max = wasm_i64x2_const_splat(UINT32_MAX);
const v128_t shifted = wasm_i64x2_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
R_.v128 = wasm_v128_bitselect(shifted, max, wasm_i64x2_gt(max, shifted));
R_.v128 = wasm_v128_and(R_.v128, wasm_i64x2_gt(R_.v128, wasm_i64x2_const_splat(0)));
return simde_vmovn_u64(simde_vreinterpretq_u64_s64( simde_int64x2_from_private(R_)));
#else
simde_int32x2_private a_ = simde_int32x2_to_private(a);
simde_uint32x2_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint32_t, a_.values[i])) ? UINT32_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshlu_n_s32(a, n) vqshlu_n_s32(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlu_n_s32
#define vqshlu_n_s32(a, n) simde_vqshlu_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vqshlu_n_s64(simde_int64x1_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) {
#if defined(SIMDE_WASM_SIMD128_NATIVE)
simde_uint64x2_private
R_,
A_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_s64(simde_vcombine_s64(a, a)));
R_.v128 = wasm_i64x2_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
const v128_t overflow = wasm_i64x2_ne(A_.v128, wasm_u64x2_shr(R_.v128, HEDLEY_STATIC_CAST(uint32_t, n)));
R_.v128 = wasm_v128_or(R_.v128, overflow);
R_.v128 = wasm_v128_andnot(R_.v128, wasm_i64x2_shr(A_.v128, 63));
return simde_vget_low_u64(simde_uint64x2_from_private(R_));
#else
simde_int64x1_private a_ = simde_int64x1_to_private(a);
simde_uint64x1_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint64_t, a_.values[i])) ? UINT64_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshlu_n_s64(a, n) vqshlu_n_s64(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshlu_n_s64
#define vqshlu_n_s64(a, n) simde_vqshlu_n_s64((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqshluq_n_s8(simde_int8x16_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) {
simde_int8x16_private a_ = simde_int8x16_to_private(a);
simde_uint8x16_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
const v128_t overflow = wasm_i8x16_ne(a_.v128, wasm_u8x16_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n)));
r_.v128 = wasm_v128_or(r_.v128, overflow);
r_.v128 = wasm_v128_andnot(r_.v128, wasm_i8x16_shr(a_.v128, 7));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint8_t, a_.values[i])) ? UINT8_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshluq_n_s8(a, n) vqshluq_n_s8(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshluq_n_s8
#define vqshluq_n_s8(a, n) simde_vqshluq_n_s8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vqshluq_n_s16(simde_int16x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) {
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_uint16x8_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
const v128_t overflow = wasm_i16x8_ne(a_.v128, wasm_u16x8_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n)));
r_.v128 = wasm_v128_or(r_.v128, overflow);
r_.v128 = wasm_v128_andnot(r_.v128, wasm_i16x8_shr(a_.v128, 15));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint16_t, a_.values[i])) ? UINT16_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshluq_n_s16(a, n) vqshluq_n_s16(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshluq_n_s16
#define vqshluq_n_s16(a, n) simde_vqshluq_n_s16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vqshluq_n_s32(simde_int32x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) {
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_uint32x4_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
const v128_t overflow = wasm_i32x4_ne(a_.v128, wasm_u32x4_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n)));
r_.v128 = wasm_v128_or(r_.v128, overflow);
r_.v128 = wasm_v128_andnot(r_.v128, wasm_i32x4_shr(a_.v128, 31));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint32_t, a_.values[i])) ? UINT32_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshluq_n_s32(a, n) vqshluq_n_s32(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshluq_n_s32
#define vqshluq_n_s32(a, n) simde_vqshluq_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vqshluq_n_s64(simde_int64x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) {
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_uint64x2_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n));
const v128_t overflow = wasm_i64x2_ne(a_.v128, wasm_u64x2_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n)));
r_.v128 = wasm_v128_or(r_.v128, overflow);
r_.v128 = wasm_v128_andnot(r_.v128, wasm_i64x2_shr(a_.v128, 63));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
__typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n;
__typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values));
r_.values = (shifted & ~overflow) | overflow;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i] << n);
r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint64_t, a_.values[i])) ? UINT64_MAX : 0);
r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqshluq_n_s64(a, n) vqshluq_n_s64(a, n)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqshluq_n_s64
#define vqshluq_n_s64(a, n) simde_vqshluq_n_s64((a), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QSHLU_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/shll_n.h | .h | 6,147 | 182 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_SHLL_N_H)
#define SIMDE_ARM_NEON_SHLL_N_H
#include "types.h"
/*
* The constant range requirements for the shift amount *n* looks strange.
* The ARM Neon Intrinsics Reference states that for *_s8, 0 << n << 7. This
* does not match the actual instruction decoding in the ARM Reference manual,
* which states that the shift amount "must be equal to the source element width
* in bits" (ARM DDI 0487F.b C7-1959). So for *_s8 instructions, *n* must be 8,
* for *_s16, it must be 16, and *_s32 must be 32 (similarly for unsigned).
*/
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vshll_n_s8 (const simde_int8x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 8) {
simde_int16x8_private r_;
simde_int8x8_private a_ = simde_int8x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, HEDLEY_STATIC_CAST(int16_t, a_.values[i]) << n);
}
return simde_int16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshll_n_s8(a, n) vshll_n_s8((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshll_n_s8
#define vshll_n_s8(a, n) simde_vshll_n_s8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vshll_n_s16 (const simde_int16x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 16) {
simde_int32x4_private r_;
simde_int16x4_private a_ = simde_int16x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i]) << n;
}
return simde_int32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshll_n_s16(a, n) vshll_n_s16((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshll_n_s16
#define vshll_n_s16(a, n) simde_vshll_n_s16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vshll_n_s32 (const simde_int32x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 32) {
simde_int64x2_private r_;
simde_int32x2_private a_ = simde_int32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) << n;
}
return simde_int64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshll_n_s32(a, n) vshll_n_s32((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshll_n_s32
#define vshll_n_s32(a, n) simde_vshll_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vshll_n_u8 (const simde_uint8x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 8) {
simde_uint16x8_private r_;
simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) << n);
}
return simde_uint16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshll_n_u8(a, n) vshll_n_u8((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshll_n_u8
#define vshll_n_u8(a, n) simde_vshll_n_u8((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vshll_n_u16 (const simde_uint16x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 16) {
simde_uint32x4_private r_;
simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) << n;
}
return simde_uint32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshll_n_u16(a, n) vshll_n_u16((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshll_n_u16
#define vshll_n_u16(a, n) simde_vshll_n_u16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vshll_n_u32 (const simde_uint32x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 32) {
simde_uint64x2_private r_;
simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) << n;
}
return simde_uint64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshll_n_u32(a, n) vshll_n_u32((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshll_n_u32
#define vshll_n_u32(a, n) simde_vshll_n_u32((a), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SHLL_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rev16.h | .h | 4,718 | 136 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_REV16_H)
#define SIMDE_ARM_NEON_REV16_H
#include "reinterpret.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vrev16_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev16_s8(a);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_set_pi8(6, 7, 4, 5, 2, 3, 0, 1));
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, a_.values, 1, 0, 3, 2, 5, 4, 7, 6);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 1];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev16_s8
#define vrev16_s8(a) simde_vrev16_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vrev16_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev16_u8(a);
#else
return simde_vreinterpret_u8_s8(simde_vrev16_s8(simde_vreinterpret_s8_u8(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev16_u8
#define vrev16_u8(a) simde_vrev16_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vrev16q_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev16q_s8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char),
vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), a)));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char),
vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_reve(a))));
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
#if defined(SIMDE_X86_SSSE3_NATIVE)
r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i ^ 1];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev16q_s8
#define vrev16q_s8(a) simde_vrev16q_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vrev16q_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrev16q_u8(a);
#else
return simde_vreinterpretq_u8_s8(simde_vrev16q_s8(simde_vreinterpretq_s8_u8(a)));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrev16q_u8
#define vrev16q_u8(a) simde_vrev16q_u8(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_REV16_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mull_lane.h | .h | 4,634 | 121 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MULL_LANE_H)
#define SIMDE_ARM_NEON_MULL_LANE_H
#include "mull.h"
#include "dup_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmull_lane_s16(a, v, lane) vmull_lane_s16((a), (v), (lane))
#else
#define simde_vmull_lane_s16(a, v, lane) simde_vmull_s16((a), simde_vdup_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_lane_s16
#define vmull_lane_s16(a, v, lane) simde_vmull_lane_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmull_lane_s32(a, v, lane) vmull_lane_s32((a), (v), (lane))
#else
#define simde_vmull_lane_s32(a, v, lane) simde_vmull_s32((a), simde_vdup_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_lane_s32
#define vmull_lane_s32(a, v, lane) simde_vmull_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmull_lane_u16(a, v, lane) vmull_lane_u16((a), (v), (lane))
#else
#define simde_vmull_lane_u16(a, v, lane) simde_vmull_u16((a), simde_vdup_lane_u16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_lane_u16
#define vmull_lane_u16(a, v, lane) simde_vmull_lane_u16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmull_lane_u32(a, v, lane) vmull_lane_u32((a), (v), (lane))
#else
#define simde_vmull_lane_u32(a, v, lane) simde_vmull_u32((a), simde_vdup_lane_u32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmull_lane_u32
#define vmull_lane_u32(a, v, lane) simde_vmull_lane_u32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmull_laneq_s16(a, v, lane) vmull_laneq_s16((a), (v), (lane))
#else
#define simde_vmull_laneq_s16(a, v, lane) simde_vmull_s16((a), simde_vdup_laneq_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmull_laneq_s16
#define vmull_laneq_s16(a, v, lane) simde_vmull_laneq_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmull_laneq_s32(a, v, lane) vmull_laneq_s32((a), (v), (lane))
#else
#define simde_vmull_laneq_s32(a, v, lane) simde_vmull_s32((a), simde_vdup_laneq_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmull_laneq_s32
#define vmull_laneq_s32(a, v, lane) simde_vmull_laneq_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmull_laneq_u16(a, v, lane) vmull_laneq_u16((a), (v), (lane))
#else
#define simde_vmull_laneq_u16(a, v, lane) simde_vmull_u16((a), simde_vdup_laneq_u16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmull_laneq_u16
#define vmull_laneq_u16(a, v, lane) simde_vmull_laneq_u16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmull_laneq_u32(a, v, lane) vmull_laneq_u32((a), (v), (lane))
#else
#define simde_vmull_laneq_u32(a, v, lane) simde_vmull_u32((a), simde_vdup_laneq_u32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmull_laneq_u32
#define vmull_laneq_u32(a, v, lane) simde_vmull_laneq_u32((a), (v), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MULL_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cmla_rot180.h | .h | 5,895 | 147 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_CMLA_ROT180_H)
#define SIMDE_ARM_NEON_CMLA_ROT180_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vcmla_rot180_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmla_rot180_f32(r, a, b);
#else
simde_float32x2_private
r_ = simde_float32x2_to_private(r),
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 0, 0);
b_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, -b_.values, -b_.values, 0, 1);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += -(b_.values[2 * i]) * a_.values[2 * i];
r_.values[2 * i + 1] += -(b_.values[2 * i + 1]) * a_.values[2 * i];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmla_rot180_f32
#define vcmla_rot180_f32(r, a, b) simde_vcmla_rot180_f32(r, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vcmlaq_rot180_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmlaq_rot180_f32(r, a, b);
#else
simde_float32x4_private
r_ = simde_float32x4_to_private(r),
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
a_.v128 = wasm_i32x4_shuffle(a_.v128, a_.v128, 0, 0, 2, 2);
b_.v128 = wasm_i32x4_shuffle(wasm_f32x4_neg(b_.v128), wasm_f32x4_neg(b_.v128), 0, 1, 2, 3);
r_.v128 = wasm_f32x4_add(r_.v128, wasm_f32x4_mul(b_.v128, a_.v128));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 0, 0, 2, 2);
b_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, -b_.values, -b_.values, 0, 1, 2, 3);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += -(b_.values[2 * i]) * a_.values[2 * i];
r_.values[2 * i + 1] += -(b_.values[2 * i + 1]) * a_.values[2 * i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmlaq_rot180_f32
#define vcmlaq_rot180_f32(r, a, b) simde_vcmlaq_rot180_f32(r, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vcmlaq_rot180_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmlaq_rot180_f64(r, a, b);
#else
simde_float64x2_private
r_ = simde_float64x2_to_private(r),
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
a_.v128 = wasm_i64x2_shuffle(a_.v128, a_.v128, 0, 0);
b_.v128 = wasm_i64x2_shuffle(wasm_f64x2_neg(b_.v128), wasm_f64x2_neg(b_.v128), 0, 1);
r_.v128 = wasm_f64x2_add(r_.v128, wasm_f64x2_mul(b_.v128, a_.v128));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 0, 0);
b_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, -b_.values, -b_.values, 0, 1);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += -(b_.values[2 * i]) * a_.values[2 * i];
r_.values[2 * i + 1] += -(b_.values[2 * i + 1]) * a_.values[2 * i];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmlaq_rot180_f64
#define vcmlaq_rot180_f64(r, a, b) simde_vcmlaq_rot180_f64(r, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CMLA_ROT180_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cge.h | .h | 27,527 | 817 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_CGE_H)
#define SIMDE_ARM_NEON_CGE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vcgeh_f16(simde_float16_t a, simde_float16_t b){
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return HEDLEY_STATIC_CAST(uint16_t, vcgeh_f16(a, b));
#else
return (simde_float16_to_float32(a) >= simde_float16_to_float32(b)) ? UINT16_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgeh_f16
#define vcgeh_f16(a, b) simde_vcgeh_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vcgeq_f16(simde_float16x8_t a, simde_float16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vcgeq_f16(a, b);
#else
simde_float16x8_private
a_ = simde_float16x8_to_private(a),
b_ = simde_float16x8_to_private(b);
simde_uint16x8_private r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgeh_f16(a_.values[i], b_.values[i]);
}
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcgeq_f16
#define vcgeq_f16(a, b) simde_vcgeq_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcgeq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_f32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a, b));
#else
simde_float32x4_private
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
simde_uint32x4_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castps_si128(_mm_cmpge_ps(a_.m128, b_.m128));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f32x4_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_f32
#define vcgeq_f32(a, b) simde_vcgeq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcgeq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgeq_f64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a, b));
#else
simde_float64x2_private
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
simde_uint64x2_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_castpd_si128(_mm_cmpge_pd(a_.m128d, b_.m128d));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_f64x2_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT64_MAX : 0;
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgeq_f64
#define vcgeq_f64(a, b) simde_vcgeq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vcgeq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpge(a, b));
#else
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
simde_uint8x16_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_or_si128(_mm_cmpgt_epi8(a_.m128i, b_.m128i), _mm_cmpeq_epi8(a_.m128i, b_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT8_MAX : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_s8
#define vcgeq_s8(a, b) simde_vcgeq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vcgeq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpge(a, b));
#else
simde_int16x8_private
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
simde_uint16x8_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_or_si128(_mm_cmpgt_epi16(a_.m128i, b_.m128i), _mm_cmpeq_epi16(a_.m128i, b_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT16_MAX : 0;
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_s16
#define vcgeq_s16(a, b) simde_vcgeq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcgeq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a, b));
#else
simde_int32x4_private
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
simde_uint32x4_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(a_.m128i, b_.m128i), _mm_cmpeq_epi32(a_.m128i, b_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_s32
#define vcgeq_s32(a, b) simde_vcgeq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcgeq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgeq_s64(a, b);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vreinterpretq_u64_s32(vmvnq_s32(vreinterpretq_s32_s64(vshrq_n_s64(vqsubq_s64(a, b), 63))));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a, b));
#else
simde_int64x2_private
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
simde_uint64x2_private r_;
#if defined(SIMDE_X86_SSE4_2_NATIVE)
r_.m128i = _mm_or_si128(_mm_cmpgt_epi64(a_.m128i, b_.m128i), _mm_cmpeq_epi64(a_.m128i, b_.m128i));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT64_MAX : 0;
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgeq_s64
#define vcgeq_s64(a, b) simde_vcgeq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vcgeq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpge(a, b));
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i =
_mm_cmpeq_epi8(
_mm_min_epu8(b_.m128i, a_.m128i),
b_.m128i
);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u8x16_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT8_MAX : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_u8
#define vcgeq_u8(a, b) simde_vcgeq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vcgeq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpge(a, b));
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i =
_mm_cmpeq_epi16(
_mm_min_epu16(b_.m128i, a_.m128i),
b_.m128i
);
#elif defined(SIMDE_X86_SSE2_NATIVE)
__m128i sign_bits = _mm_set1_epi16(INT16_MIN);
r_.m128i = _mm_or_si128(_mm_cmpgt_epi16(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)), _mm_cmpeq_epi16(a_.m128i, b_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u16x8_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT16_MAX : 0;
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_u16
#define vcgeq_u16(a, b) simde_vcgeq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vcgeq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcgeq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a, b));
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i =
_mm_cmpeq_epi32(
_mm_min_epu32(b_.m128i, a_.m128i),
b_.m128i
);
#elif defined(SIMDE_X86_SSE2_NATIVE)
__m128i sign_bits = _mm_set1_epi32(INT32_MIN);
r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)), _mm_cmpeq_epi32(a_.m128i, b_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u32x4_ge(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcgeq_u32
#define vcgeq_u32(a, b) simde_vcgeq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vcgeq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcgeq_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a, b));
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i =
_mm_cmpeq_epi64(
_mm_min_epu64(b_.m128i, a_.m128i),
b_.m128i
);
#elif defined(SIMDE_X86_SSE4_2_NATIVE)
__m128i sign_bits = _mm_set1_epi64x(INT64_MIN);
r_.m128i = _mm_or_si128(_mm_cmpgt_epi64(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)), _mm_cmpeq_epi64(a_.m128i, b_.m128i));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT64_MAX : 0;
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcgeq_u64
#define vcgeq_u64(a, b) simde_vcgeq_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vcge_f16(simde_float16x4_t a, simde_float16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vcge_f16(a, b);
#else
simde_float16x4_private
a_ = simde_float16x4_to_private(a),
b_ = simde_float16x4_to_private(b);
simde_uint16x4_private r_;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vcgeh_f16(a_.values[i], b_.values[i]);
}
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcge_f16
#define vcge_f16(a, b) simde_vcge_f16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcge_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_f32(a, b);
#else
simde_float32x2_private
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
simde_uint32x2_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_f32
#define vcge_f32(a, b) simde_vcge_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcge_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcge_f64(a, b);
#else
simde_float64x1_private
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
simde_uint64x1_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT64_MAX : 0;
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcge_f64
#define vcge_f64(a, b) simde_vcge_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vcge_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_s8(a, b);
#else
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
simde_uint8x8_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_or_si64(_mm_cmpgt_pi8(a_.m64, b_.m64), _mm_cmpeq_pi8(a_.m64, b_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT8_MAX : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_s8
#define vcge_s8(a, b) simde_vcge_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vcge_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_s16(a, b);
#else
simde_int16x4_private
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
simde_uint16x4_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_or_si64(_mm_cmpgt_pi16(a_.m64, b_.m64), _mm_cmpeq_pi16(a_.m64, b_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT16_MAX : 0;
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_s16
#define vcge_s16(a, b) simde_vcge_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcge_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_s32(a, b);
#else
simde_int32x2_private
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
simde_uint32x2_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_or_si64(_mm_cmpgt_pi32(a_.m64, b_.m64), _mm_cmpeq_pi32(a_.m64, b_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_s32
#define vcge_s32(a, b) simde_vcge_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcge_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcge_s64(a, b);
#else
simde_int64x1_private
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
simde_uint64x1_private r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT64_MAX : 0;
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcge_s64
#define vcge_s64(a, b) simde_vcge_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vcge_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 sign_bits = _mm_set1_pi8(INT8_MIN);
r_.m64 = _mm_or_si64(_mm_cmpgt_pi8(_mm_xor_si64(a_.m64, sign_bits), _mm_xor_si64(b_.m64, sign_bits)), _mm_cmpeq_pi8(a_.m64, b_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT8_MAX : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_u8
#define vcge_u8(a, b) simde_vcge_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vcge_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 sign_bits = _mm_set1_pi16(INT16_MIN);
r_.m64 = _mm_or_si64(_mm_cmpgt_pi16(_mm_xor_si64(a_.m64, sign_bits), _mm_xor_si64(b_.m64, sign_bits)), _mm_cmpeq_pi16(a_.m64, b_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT16_MAX : 0;
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_u16
#define vcge_u16(a, b) simde_vcge_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vcge_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcge_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
__m64 sign_bits = _mm_set1_pi32(INT32_MIN);
r_.m64 = _mm_or_si64(_mm_cmpgt_pi32(_mm_xor_si64(a_.m64, sign_bits), _mm_xor_si64(b_.m64, sign_bits)), _mm_cmpeq_pi32(a_.m64, b_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT32_MAX : 0;
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcge_u32
#define vcge_u32(a, b) simde_vcge_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vcge_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vcge_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] >= b_.values[i]) ? UINT64_MAX : 0;
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcge_u64
#define vcge_u64(a, b) simde_vcge_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vcged_f64(simde_float64_t a, simde_float64_t b){
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint64_t, vcged_f64(a, b));
#else
return (a >= b) ? UINT64_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcged_f64
#define vcged_f64(a, b) simde_vcged_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vcged_s64(int64_t a, int64_t b){
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint64_t, vcged_s64(a, b));
#else
return (a >= b) ? UINT64_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcged_s64
#define vcged_s64(a, b) simde_vcged_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vcged_u64(uint64_t a, uint64_t b){
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint64_t, vcged_u64(a, b));
#else
return (a >= b) ? UINT64_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcged_u64
#define vcged_u64(a, b) simde_vcged_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vcges_f32(simde_float32_t a, simde_float32_t b){
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(uint32_t, vcges_f32(a, b));
#else
return (a >= b) ? UINT32_MAX : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vcges_f32
#define vcges_f32(a, b) simde_vcges_f32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CGE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mvn.h | .h | 12,486 | 427 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_MVN_H)
#define SIMDE_ARM_NEON_MVN_H
#include "combine.h"
#include "get_low.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vmvnq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvnq_s8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_nor(a, a);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi8(a_.m128i, a_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_not(a_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvnq_s8
#define vmvnq_s8(a) simde_vmvnq_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmvnq_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvnq_s16(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_nor(a, a);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi16(a_.m128i, a_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_not(a_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvnq_s16
#define vmvnq_s16(a) simde_vmvnq_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmvnq_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvnq_s32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_nor(a, a);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi32(a_.m128i, a_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_not(a_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvnq_s32
#define vmvnq_s32(a) simde_vmvnq_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vmvnq_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvnq_u8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_nor(a, a);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi8(a_.m128i, a_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_not(a_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvnq_u8
#define vmvnq_u8(a) simde_vmvnq_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmvnq_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvnq_u16(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_nor(a, a);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi16(a_.m128i, a_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_not(a_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvnq_u16
#define vmvnq_u16(a) simde_vmvnq_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmvnq_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvnq_u32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_nor(a, a);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi32(a_.m128i, a_.m128i));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_not(a_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvnq_u32
#define vmvnq_u32(a) simde_vmvnq_u32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vmvn_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvn_s8(a);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi8(a_.m64, a_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvn_s8
#define vmvn_s8(a) simde_vmvn_s8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmvn_s16(simde_int16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvn_s16(a);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi16(a_.m64, a_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvn_s16
#define vmvn_s16(a) simde_vmvn_s16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmvn_s32(simde_int32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvn_s32(a);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi32(a_.m64, a_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvn_s32
#define vmvn_s32(a) simde_vmvn_s32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vmvn_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvn_u8(a);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi8(a_.m64, a_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvn_u8
#define vmvn_u8(a) simde_vmvn_u8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmvn_u16(simde_uint16x4_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvn_u16(a);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi16(a_.m64, a_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvn_u16
#define vmvn_u16(a) simde_vmvn_u16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmvn_u32(simde_uint32x2_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmvn_u32(a);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi32(a_.m64, a_.m64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = ~a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = ~(a_.values[i]);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmvn_u32
#define vmvn_u32(a) simde_vmvn_u32(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MVN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qrdmulh_lane.h | .h | 6,252 | 153 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_QRDMULH_LANE_H)
#define SIMDE_ARM_NEON_QRDMULH_LANE_H
#include "types.h"
#include "qrdmulh.h"
#include "dup_lane.h"
#include "get_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vqrdmulhs_lane_s32(a, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqrdmulhs_lane_s32((a), (v), (lane)))
#else
#define simde_vqrdmulhs_lane_s32(a, v, lane) vqrdmulhs_lane_s32((a), (v), (lane))
#endif
#else
#define simde_vqrdmulhs_lane_s32(a, v, lane) simde_vqrdmulhs_s32((a), simde_vget_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrdmulhs_lane_s32
#define vqrdmulhs_lane_s32(a, v, lane) simde_vqrdmulhs_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vqrdmulhs_laneq_s32(a, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqrdmulhs_laneq_s32((a), (v), (lane)))
#else
#define simde_vqrdmulhs_laneq_s32(a, v, lane) vqrdmulhs_laneq_s32((a), (v), (lane))
#endif
#else
#define simde_vqrdmulhs_laneq_s32(a, v, lane) simde_vqrdmulhs_s32((a), simde_vgetq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrdmulhs_laneq_s32
#define vqrdmulhs_laneq_s32(a, v, lane) simde_vqrdmulhs_laneq_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrdmulh_lane_s16(a, v, lane) vqrdmulh_lane_s16((a), (v), (lane))
#else
#define simde_vqrdmulh_lane_s16(a, v, lane) simde_vqrdmulh_s16((a), simde_vdup_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrdmulh_lane_s16
#define vqrdmulh_lane_s16(a, v, lane) simde_vqrdmulh_lane_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrdmulh_lane_s32(a, v, lane) vqrdmulh_lane_s32((a), (v), (lane))
#else
#define simde_vqrdmulh_lane_s32(a, v, lane) simde_vqrdmulh_s32((a), simde_vdup_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrdmulh_lane_s32
#define vqrdmulh_lane_s32(a, v, lane) simde_vqrdmulh_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrdmulhq_lane_s16(a, v, lane) vqrdmulhq_lane_s16((a), (v), (lane))
#else
#define simde_vqrdmulhq_lane_s16(a, v, lane) simde_vqrdmulhq_s16((a), simde_vdupq_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrdmulhq_lane_s16
#define vqrdmulhq_lane_s16(a, v, lane) simde_vqrdmulhq_lane_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vqrdmulhq_lane_s32(a, v, lane) vqrdmulhq_lane_s32((a), (v), (lane))
#else
#define simde_vqrdmulhq_lane_s32(a, v, lane) simde_vqrdmulhq_s32((a), simde_vdupq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqrdmulhq_lane_s32
#define vqrdmulhq_lane_s32(a, v, lane) simde_vqrdmulhq_lane_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqrdmulh_laneq_s16(a, v, lane) vqrdmulh_laneq_s16((a), (v), (lane))
#else
#define simde_vqrdmulh_laneq_s16(a, v, lane) simde_vqrdmulh_s16((a), simde_vdup_laneq_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrdmulh_laneq_s16
#define vqrdmulh_laneq_s16(a, v, lane) simde_vqrdmulh_laneq_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqrdmulh_laneq_s32(a, v, lane) vqrdmulh_laneq_s32((a), (v), (lane))
#else
#define simde_vqrdmulh_laneq_s32(a, v, lane) simde_vqrdmulh_s32((a), simde_vdup_laneq_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrdmulh_laneq_s32
#define vqrdmulh_laneq_s32(a, v, lane) simde_vqrdmulh_laneq_s32((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqrdmulhq_laneq_s16(a, v, lane) vqrdmulhq_laneq_s16((a), (v), (lane))
#else
#define simde_vqrdmulhq_laneq_s16(a, v, lane) simde_vqrdmulhq_s16((a), simde_vdupq_laneq_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrdmulhq_laneq_s16
#define vqrdmulhq_laneq_s16(a, v, lane) simde_vqrdmulhq_laneq_s16((a), (v), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vqrdmulhq_laneq_s32(a, v, lane) vqrdmulhq_laneq_s32((a), (v), (lane))
#else
#define simde_vqrdmulhq_laneq_s32(a, v, lane) simde_vqrdmulhq_s32((a), simde_vdupq_laneq_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqrdmulhq_laneq_s32
#define vqrdmulhq_laneq_s32(a, v, lane) simde_vqrdmulhq_laneq_s32((a), (v), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QRDMULH_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mla.h | .h | 8,908 | 297 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MLA_H)
#define SIMDE_ARM_NEON_MLA_H
#include "types.h"
#include "add.h"
#include "mul.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmla_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_f32(a, b, c);
#else
return simde_vadd_f32(simde_vmul_f32(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_f32
#define vmla_f32(a, b, c) simde_vmla_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmla_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64x1_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmla_f64(a, b, c);
#else
return simde_vadd_f64(simde_vmul_f64(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmla_f64
#define vmla_f64(a, b, c) simde_vmla_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vmla_s8(simde_int8x8_t a, simde_int8x8_t b, simde_int8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_s8(a, b, c);
#else
return simde_vadd_s8(simde_vmul_s8(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_s8
#define vmla_s8(a, b, c) simde_vmla_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmla_s16(simde_int16x4_t a, simde_int16x4_t b, simde_int16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_s16(a, b, c);
#else
return simde_vadd_s16(simde_vmul_s16(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_s16
#define vmla_s16(a, b, c) simde_vmla_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmla_s32(simde_int32x2_t a, simde_int32x2_t b, simde_int32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_s32(a, b, c);
#else
return simde_vadd_s32(simde_vmul_s32(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_s32
#define vmla_s32(a, b, c) simde_vmla_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vmla_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_u8(a, b, c);
#else
return simde_vadd_u8(simde_vmul_u8(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_u8
#define vmla_u8(a, b, c) simde_vmla_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmla_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_u16(a, b, c);
#else
return simde_vadd_u16(simde_vmul_u16(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_u16
#define vmla_u16(a, b, c) simde_vmla_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmla_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_u32(a, b, c);
#else
return simde_vadd_u32(simde_vmul_u32(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_u32
#define vmla_u32(a, b, c) simde_vmla_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmlaq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_f32(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_madd(b, c, a);
#elif \
defined(SIMDE_X86_FMA_NATIVE)
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b),
c_ = simde_float32x4_to_private(c);
#if defined(SIMDE_X86_FMA_NATIVE)
r_.m128 = _mm_fmadd_ps(b_.m128, c_.m128, a_.m128);
#endif
return simde_float32x4_from_private(r_);
#else
return simde_vaddq_f32(simde_vmulq_f32(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_f32
#define vmlaq_f32(a, b, c) simde_vmlaq_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmlaq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmlaq_f64(a, b, c);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_madd(b, c, a);
#elif \
defined(SIMDE_X86_FMA_NATIVE)
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b),
c_ = simde_float64x2_to_private(c);
#if defined(SIMDE_X86_FMA_NATIVE)
r_.m128d = _mm_fmadd_pd(b_.m128d, c_.m128d, a_.m128d);
#endif
return simde_float64x2_from_private(r_);
#else
return simde_vaddq_f64(simde_vmulq_f64(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmlaq_f64
#define vmlaq_f64(a, b, c) simde_vmlaq_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vmlaq_s8(simde_int8x16_t a, simde_int8x16_t b, simde_int8x16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_s8(a, b, c);
#else
return simde_vaddq_s8(simde_vmulq_s8(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_s8
#define vmlaq_s8(a, b, c) simde_vmlaq_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlaq_s16(simde_int16x8_t a, simde_int16x8_t b, simde_int16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_s16(a, b, c);
#else
return simde_vaddq_s16(simde_vmulq_s16(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_s16
#define vmlaq_s16(a, b, c) simde_vmlaq_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlaq_s32(simde_int32x4_t a, simde_int32x4_t b, simde_int32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_s32(a, b, c);
#else
return simde_vaddq_s32(simde_vmulq_s32(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_s32
#define vmlaq_s32(a, b, c) simde_vmlaq_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vmlaq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_u8(a, b, c);
#else
return simde_vaddq_u8(simde_vmulq_u8(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_u8
#define vmlaq_u8(a, b, c) simde_vmlaq_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlaq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_u16(a, b, c);
#else
return simde_vaddq_u16(simde_vmulq_u16(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_u16
#define vmlaq_u16(a, b, c) simde_vmlaq_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlaq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_u32(a, b, c);
#else
return simde_vaddq_u32(simde_vmulq_u32(b, c), a);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_u32
#define vmlaq_u32(a, b, c) simde_vmlaq_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLA_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/fma_lane.h | .h | 8,714 | 226 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_FMA_LANE_H)
#define SIMDE_ARM_NEON_FMA_LANE_H
#include "add.h"
#include "dup_n.h"
#include "get_lane.h"
#include "mul.h"
#include "mul_lane.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
/* simde_vfmad_lane_f64 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vfmad_lane_f64(a, b, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmad_lane_f64(a, b, v, lane))
#else
#define simde_vfmad_lane_f64(a, b, v, lane) vfmad_lane_f64((a), (b), (v), (lane))
#endif
#else
#define simde_vfmad_lane_f64(a, b, v, lane) \
simde_vget_lane_f64( \
simde_vadd_f64( \
simde_vdup_n_f64(a), \
simde_vdup_n_f64(simde_vmuld_lane_f64(b, v, lane)) \
), \
0 \
)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmad_lane_f64
#define vfmad_lane_f64(a, b, v, lane) simde_vfmad_lane_f64(a, b, v, lane)
#endif
/* simde_vfmad_laneq_f64 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vfmad_laneq_f64(a, b, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmad_laneq_f64(a, b, v, lane))
#else
#define simde_vfmad_laneq_f64(a, b, v, lane) vfmad_laneq_f64((a), (b), (v), (lane))
#endif
#else
#define simde_vfmad_laneq_f64(a, b, v, lane) \
simde_vget_lane_f64( \
simde_vadd_f64( \
simde_vdup_n_f64(a), \
simde_vdup_n_f64(simde_vmuld_laneq_f64(b, v, lane)) \
), \
0 \
)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmad_laneq_f64
#define vfmad_laneq_f64(a, b, v, lane) simde_vfmad_laneq_f64(a, b, v, lane)
#endif
/* simde_vfmas_lane_f32 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vfmas_lane_f32(a, b, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmas_lane_f32(a, b, v, lane))
#else
#define simde_vfmas_lane_f32(a, b, v, lane) vfmas_lane_f32((a), (b), (v), (lane))
#endif
#else
#define simde_vfmas_lane_f32(a, b, v, lane) \
simde_vget_lane_f32( \
simde_vadd_f32( \
simde_vdup_n_f32(a), \
simde_vdup_n_f32(simde_vmuls_lane_f32(b, v, lane)) \
), \
0 \
)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmas_lane_f32
#define vfmas_lane_f32(a, b, v, lane) simde_vfmas_lane_f32(a, b, v, lane)
#endif
/* simde_vfmas_laneq_f32 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vfmas_laneq_f32(a, b, v, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmas_laneq_f32(a, b, v, lane))
#else
#define simde_vfmas_laneq_f32(a, b, v, lane) vfmas_laneq_f32((a), (b), (v), (lane))
#endif
#else
#define simde_vfmas_laneq_f32(a, b, v, lane) \
simde_vget_lane_f32( \
simde_vadd_f32( \
simde_vdup_n_f32(a), \
simde_vdup_n_f32(simde_vmuls_laneq_f32(b, v, lane)) \
), \
0 \
)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmas_laneq_f32
#define vfmas_laneq_f32(a, b, v, lane) simde_vfmas_laneq_f32(a, b, v, lane)
#endif
/* simde_vfma_lane_f32 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfma_lane_f32(a, b, v, lane) vfma_lane_f32(a, b, v, lane)
#else
#define simde_vfma_lane_f32(a, b, v, lane) simde_vadd_f32(a, simde_vmul_lane_f32(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfma_lane_f32
#define vfma_lane_f32(a, b, v, lane) simde_vfma_lane_f32(a, b, v, lane)
#endif
/* simde_vfma_lane_f64 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfma_lane_f64(a, b, v, lane) vfma_lane_f64((a), (b), (v), (lane))
#else
#define simde_vfma_lane_f64(a, b, v, lane) simde_vadd_f64(a, simde_vmul_lane_f64(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfma_lane_f64
#define vfma_lane_f64(a, b, v, lane) simde_vfma_lane_f64(a, b, v, lane)
#endif
/* simde_vfma_laneq_f32 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfma_laneq_f32(a, b, v, lane) vfma_laneq_f32((a), (b), (v), (lane))
#else
#define simde_vfma_laneq_f32(a, b, v, lane) simde_vadd_f32(a, simde_vmul_laneq_f32(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfma_laneq_f32
#define vfma_laneq_f32(a, b, v, lane) simde_vfma_laneq_f32(a, b, v, lane)
#endif
/* simde_vfma_laneq_f64 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfma_laneq_f64(a, b, v, lane) vfma_laneq_f64((a), (b), (v), (lane))
#else
#define simde_vfma_laneq_f64(a, b, v, lane) simde_vadd_f64(a, simde_vmul_laneq_f64(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfma_laneq_f64
#define vfma_laneq_f64(a, b, v, lane) simde_vfma_laneq_f64(a, b, v, lane)
#endif
/* simde_vfmaq_lane_f64 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfmaq_lane_f64(a, b, v, lane) vfmaq_lane_f64((a), (b), (v), (lane))
#else
#define simde_vfmaq_lane_f64(a, b, v, lane) simde_vaddq_f64(a, simde_vmulq_lane_f64(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmaq_lane_f64
#define vfmaq_lane_f64(a, b, v, lane) simde_vfmaq_lane_f64(a, b, v, lane)
#endif
/* simde_vfmaq_lane_f32 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfmaq_lane_f32(a, b, v, lane) vfmaq_lane_f32((a), (b), (v), (lane))
#else
#define simde_vfmaq_lane_f32(a, b, v, lane) simde_vaddq_f32(a, simde_vmulq_lane_f32(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmaq_lane_f32
#define vfmaq_lane_f32(a, b, v, lane) simde_vfmaq_lane_f32(a, b, v, lane)
#endif
/* simde_vfmaq_laneq_f32 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfmaq_laneq_f32(a, b, v, lane) vfmaq_laneq_f32((a), (b), (v), (lane))
#else
#define simde_vfmaq_laneq_f32(a, b, v, lane) \
simde_vaddq_f32(a, simde_vmulq_laneq_f32(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmaq_laneq_f32
#define vfmaq_laneq_f32(a, b, v, lane) simde_vfmaq_laneq_f32(a, b, v, lane)
#endif
/* simde_vfmaq_laneq_f64 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA)
#define simde_vfmaq_laneq_f64(a, b, v, lane) vfmaq_laneq_f64((a), (b), (v), (lane))
#else
#define simde_vfmaq_laneq_f64(a, b, v, lane) \
simde_vaddq_f64(a, simde_vmulq_laneq_f64(b, v, lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vfmaq_laneq_f64
#define vfmaq_laneq_f64(a, b, v, lane) simde_vfmaq_laneq_f64(a, b, v, lane)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_FMA_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1.h | .h | 12,207 | 437 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_LD1_H)
#define SIMDE_ARM_NEON_LD1_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float16x4_t
simde_vld1_f16(simde_float16 const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vld1_f16(ptr);
#else
simde_float16x4_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_float16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_f16
#define vld1_f16(a) simde_vld1_f16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vld1_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_f32(ptr);
#else
simde_float32x2_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_f32
#define vld1_f32(a) simde_vld1_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vld1_f64(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(1)]) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vld1_f64(ptr);
#else
simde_float64x1_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_float64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1_f64
#define vld1_f64(a) simde_vld1_f64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vld1_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_s8(ptr);
#else
simde_int8x8_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s8
#define vld1_s8(a) simde_vld1_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vld1_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_s16(ptr);
#else
simde_int16x4_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s16
#define vld1_s16(a) simde_vld1_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vld1_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_s32(ptr);
#else
simde_int32x2_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s32
#define vld1_s32(a) simde_vld1_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vld1_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(1)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_s64(ptr);
#else
simde_int64x1_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_s64
#define vld1_s64(a) simde_vld1_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vld1_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_u8(ptr);
#else
simde_uint8x8_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u8
#define vld1_u8(a) simde_vld1_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vld1_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_u16(ptr);
#else
simde_uint16x4_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u16
#define vld1_u16(a) simde_vld1_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vld1_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_u32(ptr);
#else
simde_uint32x2_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u32
#define vld1_u32(a) simde_vld1_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vld1_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(1)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1_u64(ptr);
#else
simde_uint64x1_private r_;
simde_memcpy(&r_, ptr, sizeof(r_));
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_u64
#define vld1_u64(a) simde_vld1_u64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float16x8_t
simde_vld1q_f16(simde_float16 const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16)
return vld1q_f16(ptr);
#else
simde_float16x8_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_float16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_f16
#define vld1q_f16(a) simde_vld1q_f16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vld1q_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_f32(ptr);
#else
simde_float32x4_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_f32
#define vld1q_f32(a) simde_vld1q_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vld1q_f64(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vld1q_f64(ptr);
#else
simde_float64x2_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1q_f64
#define vld1q_f64(a) simde_vld1q_f64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vld1q_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_s8(ptr);
#else
simde_int8x16_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s8
#define vld1q_s8(a) simde_vld1q_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vld1q_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_s16(ptr);
#else
simde_int16x8_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s16
#define vld1q_s16(a) simde_vld1q_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vld1q_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_s32(ptr);
#else
simde_int32x4_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s32
#define vld1q_s32(a) simde_vld1q_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vld1q_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_s64(ptr);
#else
simde_int64x2_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s64
#define vld1q_s64(a) simde_vld1q_s64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vld1q_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_u8(ptr);
#else
simde_uint8x16_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u8
#define vld1q_u8(a) simde_vld1q_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vld1q_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_u16(ptr);
#else
simde_uint16x8_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u16
#define vld1q_u16(a) simde_vld1q_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vld1q_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_u32(ptr);
#else
simde_uint32x4_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u32
#define vld1q_u32(a) simde_vld1q_u32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vld1q_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vld1q_u64(ptr);
#else
simde_uint64x2_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_load(ptr);
#else
simde_memcpy(&r_, ptr, sizeof(r_));
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u64
#define vld1q_u64(a) simde_vld1q_u64((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qtbl.h | .h | 20,914 | 531 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_QTBL_H)
#define SIMDE_ARM_NEON_QTBL_H
#include "reinterpret.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqtbl1_u8(simde_uint8x16_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl1_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x2_t split;
simde_memcpy(&split, &t, sizeof(split));
return vtbl2_u8(split, idx);
#else
simde_uint8x16_private t_ = simde_uint8x16_to_private(t);
simde_uint8x8_private
r_,
idx_ = simde_uint8x8_to_private(idx);
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i idx128 = _mm_set1_epi64(idx_.m64);
__m128i r128 = _mm_shuffle_epi8(t_.m128i, _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(15))));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl1_u8
#define vqtbl1_u8(t, idx) simde_vqtbl1_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqtbl1_s8(simde_int8x16_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl1_s8(t, idx);
#else
return simde_vreinterpret_s8_u8(simde_vqtbl1_u8(simde_vreinterpretq_u8_s8(t), idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl1_s8
#define vqtbl1_s8(t, idx) simde_vqtbl1_s8((t), (idx))
#endif
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqtbl2_u8(simde_uint8x16x2_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl2_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x4_t split;
simde_memcpy(&split, &t, sizeof(split));
return vtbl4_u8(split, idx);
#else
simde_uint8x16_private t_[2] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]) };
simde_uint8x8_private
r_,
idx_ = simde_uint8x8_to_private(idx);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i idx128 = _mm_set1_epi64(idx_.m64);
idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(31)));
__m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128);
__m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128);
__m128i r128 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl2_u8
#define vqtbl2_u8(t, idx) simde_vqtbl2_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqtbl2_s8(simde_int8x16x2_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl2_s8(t, idx);
#else
simde_uint8x16x2_t t_;
simde_memcpy(&t_, &t, sizeof(t_));
return simde_vreinterpret_s8_u8(simde_vqtbl2_u8(t_, idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl2_s8
#define vqtbl2_s8(t, idx) simde_vqtbl2_s8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqtbl3_u8(simde_uint8x16x3_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl3_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8_t idx_hi = vsub_u8(idx, vdup_n_u8(32));
uint8x8x4_t split_lo;
uint8x8x2_t split_hi;
simde_memcpy(&split_lo, &t.val[0], sizeof(split_lo));
simde_memcpy(&split_hi, &t.val[2], sizeof(split_hi));
uint8x8_t lo = vtbl4_u8(split_lo, idx);
uint8x8_t hi = vtbl2_u8(split_hi, idx_hi);
return vorr_u8(lo, hi);
#else
simde_uint8x16_private t_[3] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]),
simde_uint8x16_to_private(t.val[2]) };
simde_uint8x8_private
r_,
idx_ = simde_uint8x8_to_private(idx);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i idx128 = _mm_set1_epi64(idx_.m64);
idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(47)));
__m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128);
__m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128);
__m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3));
__m128i r128_2 = _mm_shuffle_epi8(t_[2].m128i, idx128);
__m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(idx128, 2));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl3_u8
#define vqtbl3_u8(t, idx) simde_vqtbl3_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqtbl3_s8(simde_int8x16x3_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl3_s8(t, idx);
#else
simde_uint8x16x3_t t_;
simde_memcpy(&t_, &t, sizeof(t_));
return simde_vreinterpret_s8_u8(simde_vqtbl3_u8(t_, idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl3_s8
#define vqtbl3_s8(t, idx) simde_vqtbl3_s8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqtbl4_u8(simde_uint8x16x4_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl4_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8_t idx_hi = vsub_u8(idx, vdup_n_u8(32));
uint8x8x4_t split_lo;
uint8x8x4_t split_hi;
simde_memcpy(&split_lo, &t.val[0], sizeof(split_lo));
simde_memcpy(&split_hi, &t.val[2], sizeof(split_hi));
uint8x8_t lo = vtbl4_u8(split_lo, idx);
uint8x8_t hi = vtbl4_u8(split_hi, idx_hi);
return vorr_u8(lo, hi);
#else
simde_uint8x16_private t_[4] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]),
simde_uint8x16_to_private(t.val[2]), simde_uint8x16_to_private(t.val[3]) };
simde_uint8x8_private
r_,
idx_ = simde_uint8x8_to_private(idx);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
__m128i idx128 = _mm_set1_epi64(idx_.m64);
idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(63)));
__m128i idx128_shl3 = _mm_slli_epi32(idx128, 3);
__m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128);
__m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128);
__m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, idx128_shl3);
__m128i r128_2 = _mm_shuffle_epi8(t_[2].m128i, idx128);
__m128i r128_3 = _mm_shuffle_epi8(t_[3].m128i, idx128);
__m128i r128_23 = _mm_blendv_epi8(r128_2, r128_3, idx128_shl3);
__m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(idx128, 2));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0;
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl4_u8
#define vqtbl4_u8(t, idx) simde_vqtbl4_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqtbl4_s8(simde_int8x16x4_t t, simde_uint8x8_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl4_s8(t, idx);
#else
simde_uint8x16x4_t t_;
simde_memcpy(&t_, &t, sizeof(t_));
return simde_vreinterpret_s8_u8(simde_vqtbl4_u8(t_, idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl4_s8
#define vqtbl4_s8(t, idx) simde_vqtbl4_s8((t), (idx))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqtbl1q_u8(simde_uint8x16_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl1q_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x2_t split;
simde_memcpy(&split, &t, sizeof(split));
uint8x8_t lo = vtbl2_u8(split, vget_low_u8(idx));
uint8x8_t hi = vtbl2_u8(split, vget_high_u8(idx));
return vcombine_u8(lo, hi);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(vec_perm(t, t, idx), vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 16))));
#else
simde_uint8x16_private t_ = simde_uint8x16_to_private(t);
simde_uint8x16_private
r_,
idx_ = simde_uint8x16_to_private(idx);
#if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
r_.m128i = _mm_shuffle_epi8(t_.m128i, _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(15))));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_swizzle(t_.v128, idx_.v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl1q_u8
#define vqtbl1q_u8(t, idx) simde_vqtbl1q_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqtbl1q_s8(simde_int8x16_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl1q_s8(t, idx);
#else
return simde_vreinterpretq_s8_u8(simde_vqtbl1q_u8(simde_vreinterpretq_u8_s8(t), idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl1q_s8
#define vqtbl1q_s8(t, idx) simde_vqtbl1q_s8((t), (idx))
#endif
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqtbl2q_u8(simde_uint8x16x2_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl2q_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x8x4_t split;
simde_memcpy(&split, &t, sizeof(split));
uint8x8_t lo = vtbl4_u8(split, vget_low_u8(idx));
uint8x8_t hi = vtbl4_u8(split, vget_high_u8(idx));
return vcombine_u8(lo, hi);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(vec_perm(t.val[0], t.val[1], idx),
vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 32))));
#else
simde_uint8x16_private t_[2] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]) };
simde_uint8x16_private
r_,
idx_ = simde_uint8x16_to_private(idx);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(31)));
__m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i);
__m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i);
r_.m128i = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx_.m128i, 3));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128),
wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl2q_u8
#define vqtbl2q_u8(t, idx) simde_vqtbl2q_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqtbl2q_s8(simde_int8x16x2_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl2q_s8(t, idx);
#else
simde_uint8x16x2_t t_;
simde_memcpy(&t_, &t, sizeof(t_));
return simde_vreinterpretq_s8_u8(simde_vqtbl2q_u8(t_, idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl2q_s8
#define vqtbl2q_s8(t, idx) simde_vqtbl2q_s8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqtbl3q_u8(simde_uint8x16x3_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl3q_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x16_t idx_hi = vsubq_u8(idx, vdupq_n_u8(32));
uint8x8x4_t split_lo;
uint8x8x2_t split_hi;
simde_memcpy(&split_lo, &t.val[0], sizeof(split_lo));
simde_memcpy(&split_hi, &t.val[2], sizeof(split_hi));
uint8x8_t hi_lo = vtbl2_u8(split_hi, vget_low_u8(idx_hi));
uint8x8_t hi_hi = vtbl2_u8(split_hi, vget_high_u8(idx_hi));
uint8x8_t lo = vtbx4_u8(hi_lo, split_lo, vget_low_u8(idx));
uint8x8_t hi = vtbx4_u8(hi_hi, split_lo, vget_high_u8(idx));
return vcombine_u8(lo, hi);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_01 = vec_perm(t.val[0], t.val[1], idx);
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_2 = vec_perm(t.val[2], t.val[2], idx);
return vec_and(vec_sel(r_01, r_2, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))),
vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 48))));
#else
simde_uint8x16_private t_[3] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]),
simde_uint8x16_to_private(t.val[2]) };
simde_uint8x16_private
r_,
idx_ = simde_uint8x16_to_private(idx);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(47)));
__m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i);
__m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i);
__m128i r_01 = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx_.m128i, 3));
__m128i r_2 = _mm_shuffle_epi8(t_[2].m128i, idx_.m128i);
r_.m128i = _mm_blendv_epi8(r_01, r_2, _mm_slli_epi32(idx_.m128i, 2));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128),
wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))),
wasm_i8x16_swizzle(t_[2].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(32))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl3q_u8
#define vqtbl3q_u8(t, idx) simde_vqtbl3q_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqtbl3q_s8(simde_int8x16x3_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl3q_s8(t, idx);
#else
simde_uint8x16x3_t t_;
simde_memcpy(&t_, &t, sizeof(t_));
return simde_vreinterpretq_s8_u8(simde_vqtbl3q_u8(t_, idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl3q_s8
#define vqtbl3q_s8(t, idx) simde_vqtbl3q_s8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqtbl4q_u8(simde_uint8x16x4_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl4q_u8(t, idx);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint8x16_t idx_hi = vsubq_u8(idx, vdupq_n_u8(32));
uint8x8x4_t split_lo;
uint8x8x4_t split_hi;
simde_memcpy(&split_lo, &t.val[0], sizeof(split_lo));
simde_memcpy(&split_hi, &t.val[2], sizeof(split_hi));
uint8x8_t lo_lo = vtbl4_u8(split_lo, vget_low_u8(idx));
uint8x8_t lo_hi = vtbl4_u8(split_lo, vget_high_u8(idx));
uint8x8_t lo = vtbx4_u8(lo_lo, split_hi, vget_low_u8(idx_hi));
uint8x8_t hi = vtbx4_u8(lo_hi, split_hi, vget_high_u8(idx_hi));
return vcombine_u8(lo, hi);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_01 = vec_perm(t.val[0], t.val[1], idx);
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_23 = vec_perm(t.val[2], t.val[3], idx);
return vec_and(vec_sel(r_01, r_23, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))),
vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 64))));
#else
simde_uint8x16_private t_[4] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]),
simde_uint8x16_to_private(t.val[2]), simde_uint8x16_to_private(t.val[3]) };
simde_uint8x16_private
r_,
idx_ = simde_uint8x16_to_private(idx);
#if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(63)));
__m128i idx_shl3 = _mm_slli_epi32(idx_.m128i, 3);
__m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i);
__m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i);
__m128i r_01 = _mm_blendv_epi8(r_0, r_1, idx_shl3);
__m128i r_2 = _mm_shuffle_epi8(t_[2].m128i, idx_.m128i);
__m128i r_3 = _mm_shuffle_epi8(t_[3].m128i, idx_.m128i);
__m128i r_23 = _mm_blendv_epi8(r_2, r_3, idx_shl3);
r_.m128i = _mm_blendv_epi8(r_01, r_23, _mm_slli_epi32(idx_.m128i, 2));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128),
wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))),
wasm_v128_or(wasm_i8x16_swizzle(t_[2].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(32))),
wasm_i8x16_swizzle(t_[3].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(48)))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0;
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl4q_u8
#define vqtbl4q_u8(t, idx) simde_vqtbl4q_u8((t), (idx))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqtbl4q_s8(simde_int8x16x4_t t, simde_uint8x16_t idx) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqtbl4q_s8(t, idx);
#else
simde_uint8x16x4_t t_;
simde_memcpy(&t_, &t, sizeof(t_));
return simde_vreinterpretq_s8_u8(simde_vqtbl4q_u8(t_, idx));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqtbl4q_s8
#define vqtbl4q_s8(t, idx) simde_vqtbl4q_s8((t), (idx))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QTBL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/zip2.h | .h | 20,911 | 626 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ZIP2_H)
#define SIMDE_ARM_NEON_ZIP2_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vzip2_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_f32(a, b);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_f32
#define vzip2_f32(a, b) simde_vzip2_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vzip2_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_s8
#define vzip2_s8(a, b) simde_vzip2_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vzip2_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 2, 6, 3, 7);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_s16
#define vzip2_s16(a, b) simde_vzip2_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vzip2_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_s32
#define vzip2_s32(a, b) simde_vzip2_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vzip2_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_u8
#define vzip2_u8(a, b) simde_vzip2_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vzip2_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 2, 6, 3, 7);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_u16
#define vzip2_u16(a, b) simde_vzip2_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vzip2_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_unpackhi_pi32(a_.m64, b_.m64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2_u32
#define vzip2_u32(a, b) simde_vzip2_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vzip2q_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_f32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 2, 6, 3, 7);
#elif defined(SIMDE_X86_SSE_NATIVE)
r_.m128 = _mm_unpackhi_ps(a_.m128, b_.m128);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_f32
#define vzip2q_f32(a, b) simde_vzip2q_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vzip2q_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_f64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_mergel(a, b);
#else
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128d = _mm_unpackhi_pd(a_.m128d, b_.m128d);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_f64
#define vzip2q_f64(a, b) simde_vzip2q_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vzip2q_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_s8
#define vzip2q_s8(a, b) simde_vzip2q_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vzip2q_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 4, 12, 5, 13, 6, 14, 7, 15);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_s16
#define vzip2q_s16(a, b) simde_vzip2q_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vzip2q_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 2, 6, 3, 7);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_s32
#define vzip2q_s32(a, b) simde_vzip2q_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vzip2q_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_mergel(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_s64
#define vzip2q_s64(a, b) simde_vzip2q_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vzip2q_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_u8
#define vzip2q_u8(a, b) simde_vzip2q_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vzip2q_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 4, 12, 5, 13, 6, 14, 7, 15);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_u16
#define vzip2q_u16(a, b) simde_vzip2q_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vzip2q_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_mergel(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 2, 6, 3, 7);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi32(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_u32
#define vzip2q_u32(a, b) simde_vzip2q_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vzip2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vzip2q_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_mergel(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_unpackhi_epi64(a_.m128i, b_.m128i);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3);
#else
const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < halfway_point ; i++) {
r_.values[(2 * i) ] = a_.values[halfway_point + i];
r_.values[(2 * i) + 1] = b_.values[halfway_point + i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vzip2q_u64
#define vzip2q_u64(a, b) simde_vzip2q_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ZIP2_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/and.h | .h | 16,286 | 553 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_AND_H)
#define SIMDE_ARM_NEON_AND_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vand_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_s8
#define vand_s8(a, b) simde_vand_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vand_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_s16
#define vand_s16(a, b) simde_vand_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vand_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_s32
#define vand_s32(a, b) simde_vand_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vand_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_s64(a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_s64
#define vand_s64(a, b) simde_vand_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vand_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_u8
#define vand_u8(a, b) simde_vand_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vand_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_u16
#define vand_u16(a, b) simde_vand_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vand_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_u32
#define vand_u32(a, b) simde_vand_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vand_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vand_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_and_si64(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vand_u64
#define vand_u64(a, b) simde_vand_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vandq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_s8
#define vandq_s8(a, b) simde_vandq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vandq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_s16
#define vandq_s16(a, b) simde_vandq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vandq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_s32
#define vandq_s32(a, b) simde_vandq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vandq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_and(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_s64
#define vandq_s64(a, b) simde_vandq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vandq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_u8
#define vandq_u8(a, b) simde_vandq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vandq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_u16
#define vandq_u16(a, b) simde_vandq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vandq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_and(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_u32
#define vandq_u32(a, b) simde_vandq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vandq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vandq_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return vec_and(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_and_si128(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_v128_and(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values & b_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] & b_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vandq_u64
#define vandq_u64(a, b) simde_vandq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_AND_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cnt.h | .h | 5,869 | 171 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_CNT_H)
#define SIMDE_ARM_NEON_CNT_H
#include "types.h"
#include "reinterpret.h"
#include <limits.h>
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_x_arm_neon_cntb(uint8_t v) {
v = v - ((v >> 1) & (85));
v = (v & (51)) + ((v >> (2)) & (51));
v = (v + (v >> (4))) & (15);
return HEDLEY_STATIC_CAST(uint8_t, v) >> (sizeof(uint8_t) - 1) * CHAR_BIT;
}
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vcnt_s8(simde_int8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcnt_s8(a);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, simde_x_arm_neon_cntb(HEDLEY_STATIC_CAST(uint8_t, a_.values[i])));
}
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcnt_s8
#define vcnt_s8(a) simde_vcnt_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vcnt_u8(simde_uint8x8_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcnt_u8(a);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_x_arm_neon_cntb(a_.values[i]);
}
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcnt_u8
#define vcnt_u8(a) simde_vcnt_u8((a))
#endif
/* The x86 implementations are stolen from
* https://github.com/WebAssembly/simd/pull/379. They could be cleaned
* up a bit if someone is bored; they're mostly just direct
* translations from the assembly. */
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vcntq_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vcntq_s8(a);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), a)));
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a);
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BITALG_NATIVE)
r_.m128i = _mm_popcnt_epi8(a_.m128i);
#elif defined(SIMDE_X86_AVX2_NATIVE)
__m128i tmp0 = _mm_set1_epi8(0x0f);
__m128i tmp1 = _mm_andnot_si128(tmp0, a_.m128i);
__m128i y = _mm_and_si128(tmp0, a_.m128i);
tmp0 = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
tmp1 = _mm_srli_epi16(tmp1, 4);
y = _mm_shuffle_epi8(tmp0, y);
tmp1 = _mm_shuffle_epi8(tmp0, tmp1);
r_.m128i = _mm_add_epi8(y, tmp1);
#elif defined(SIMDE_X86_SSSE3_NATIVE)
__m128i tmp0 = _mm_set1_epi8(0x0f);
__m128i tmp1 = a_.m128i;
tmp1 = _mm_and_si128(tmp1, tmp0);
tmp0 = _mm_andnot_si128(tmp0, a_.m128i);
__m128i y = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
tmp0 = _mm_srli_epi16(tmp0, 4);
y = _mm_shuffle_epi8(y, tmp1);
tmp1 = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
tmp1 = _mm_shuffle_epi8(tmp1, tmp0);
r_.m128i = _mm_add_epi8(y, tmp1);
#elif defined(SIMDE_X86_SSE2_NATIVE)
__m128i tmp = _mm_and_si128(_mm_srli_epi16(a_.m128i, 1), _mm_set1_epi8(0x55));
a_.m128i = _mm_sub_epi8(a_.m128i, tmp);
tmp = a_.m128i;
a_.m128i = _mm_and_si128(a_.m128i, _mm_set1_epi8(0x33));
tmp = _mm_and_si128(_mm_srli_epi16(tmp, 2), _mm_set1_epi8(0x33));
a_.m128i = _mm_add_epi8(a_.m128i, tmp);
tmp = _mm_srli_epi16(a_.m128i, 4);
a_.m128i = _mm_add_epi8(a_.m128i, tmp);
r_.m128i = _mm_and_si128(a_.m128i, _mm_set1_epi8(0x0f));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, simde_x_arm_neon_cntb(HEDLEY_STATIC_CAST(uint8_t, a_.values[i])));
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcntq_s8
#define vcntq_s8(a) simde_vcntq_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vcntq_u8(simde_uint8x16_t a) {
return simde_vreinterpretq_u8_s8(simde_vcntq_s8(simde_vreinterpretq_s8_u8(a)));
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vcntq_u8
#define vcntq_u8(a) simde_vcntq_u8((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CNT_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rshl.h | .h | 45,253 | 956 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Christopher Moore <moore@free.fr>
*/
#if !defined(SIMDE_ARM_NEON_RSHL_H)
#define SIMDE_ARM_NEON_RSHL_H
#include "../../x86/avx.h"
#include "types.h"
/* Notes from the implementer (Christopher Moore aka rosbif)
*
* I have tried to exactly reproduce the documented behaviour of the
* ARM NEON rshl and rshlq intrinsics.
* This is complicated for the following reasons:-
*
* a) Negative shift counts shift right.
*
* b) Only the low byte of the shift count is used but the shift count
* is not limited to 8-bit values (-128 to 127).
*
* c) Overflow must be avoided when rounding, together with sign change
* warning/errors in the C versions.
*
* d) Intel SIMD is not nearly as complete as NEON and AltiVec.
* There were no intrisics with a vector shift count before AVX2 which
* only has 32 and 64-bit logical ones and only a 32-bit arithmetic
* one. The others need AVX512. There are no 8-bit shift intrinsics at
* all, even with a scalar shift count. It is surprising to use AVX2
* and even AVX512 to implement a 64-bit vector operation.
*
* e) Many shift implementations, and the C standard, do not treat a
* shift count >= the object's size in bits as one would expect.
* (Personally I feel that > is silly but == can be useful.)
*
* Note that even the C17/18 standard does not define the behaviour of
* a right shift of a negative value.
* However Evan and I agree that all compilers likely to be used
* implement this as an arithmetic right shift with sign extension.
* If this is not the case it could be replaced by a logical right shift
* if negative values are complemented before and after the shift.
*
* Some of the SIMD translations may be slower than the portable code,
* particularly those for vectors with only one or two elements.
* But I had fun writing them ;-)
*
*/
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vrshld_s64(int64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrshld_s64(a, b);
#else
b = HEDLEY_STATIC_CAST(int8_t, b);
return
(simde_math_llabs(b) >= 64)
? 0
: (b >= 0)
? (a << b)
: ((a + (INT64_C(1) << (-b - 1))) >> -b);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrshld_s64
#define vrshld_s64(a, b) simde_vrshld_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vrshld_u64(uint64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrshld_u64(a, HEDLEY_STATIC_CAST(int64_t, b));
#else
b = HEDLEY_STATIC_CAST(int8_t, b);
return
(b >= 64) ? 0 :
(b >= 0) ? (a << b) :
(b >= -64) ? (((b == -64) ? 0 : (a >> -b)) + ((a >> (-b - 1)) & 1)) : 0;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrshld_u64
#define vrshld_u64(a, b) simde_vrshld_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vrshl_s8 (const simde_int8x8_t a, const simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi16(zero, zero);
__m128i a128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(a_.m64));
__m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b_.m64));
__m128i a128_shr = _mm_srav_epi16(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128),
_mm_srai_epi16(_mm_sub_epi16(a128_shr, ff), 1),
_mm_cmpgt_epi16(zero, b128));
r_.m64 = _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128));
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m256i zero = _mm256_setzero_si256();
const __m256i ff = _mm256_cmpeq_epi32(zero, zero);
__m256i a256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(a_.m64));
__m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b_.m64));
__m256i a256_shr = _mm256_srav_epi32(a256, _mm256_xor_si256(b256, ff));
__m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256),
_mm256_srai_epi32(_mm256_sub_epi32(a256_shr, ff), 1),
_mm256_cmpgt_epi32(zero, b256));
r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400));
r_.m64 = _mm_set_pi32(simde_mm256_extract_epi32(r256, 4), simde_mm256_extract_epi32(r256, 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t,
(simde_math_abs(b_.values[i]) >= 8) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i]));
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_s8
#define vrshl_s8(a, b) simde_vrshl_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vrshl_s16 (const simde_int16x4_t a, const simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i a128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(a_.m64));
__m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b_.m64));
b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24);
__m128i a128_shr = _mm_srav_epi32(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128),
_mm_srai_epi32(_mm_sub_epi32(a128_shr, ff), 1),
_mm_cmpgt_epi32(zero, b128));
r_.m64 = _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] = HEDLEY_STATIC_CAST(int16_t,
(simde_math_abs(b_.values[i]) >= 16) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i]));
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_s16
#define vrshl_s16(a, b) simde_vrshl_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vrshl_s32 (const simde_int32x2_t a, const simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i a128 = _mm_movpi64_epi64(a_.m64);
__m128i b128 = _mm_movpi64_epi64(b_.m64);
b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24);
__m128i a128_shr = _mm_srav_epi32(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128),
_mm_srai_epi32(_mm_sub_epi32(a128_shr, ff), 1),
_mm_cmpgt_epi32(zero, b128));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] = HEDLEY_STATIC_CAST(int32_t,
(simde_math_abs(b_.values[i]) >= 32) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i]));
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_s32
#define vrshl_s32(a, b) simde_vrshl_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vrshl_s64 (const simde_int64x1_t a, const simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_s64(a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi64(zero, zero);
__m128i a128 = _mm_movpi64_epi64(a_.m64);
__m128i b128 = _mm_movpi64_epi64(b_.m64);
b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56);
__m128i a128_shr = _mm_srav_epi64(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128),
_mm_srai_epi64(_mm_sub_epi64(a128_shr, ff), 1),
_mm_cmpgt_epi64(zero, b128));
r_.m64 = _mm_movepi64_pi64(r128);
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ones = _mm_set1_epi64x(1);
__m128i a128 = _mm_movpi64_epi64(a_.m64);
__m128i b128 = _mm_movpi64_epi64(b_.m64);
__m128i maska = _mm_cmpgt_epi64(zero, a128);
__m128i b128_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF));
__m128i a128_rnd = _mm_and_si128(_mm_srlv_epi64(a128, _mm_sub_epi64(b128_abs, ones)), ones);
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128_abs),
_mm_add_epi64(_mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a128, maska), b128_abs), maska), a128_rnd),
_mm_cmpgt_epi64(zero, _mm_slli_epi64(b128, 56)));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vrshld_s64(a_.values[i], b_.values[i]);
}
#endif
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_s64
#define vrshl_s64(a, b) simde_vrshl_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vrshl_u8 (const simde_uint8x8_t a, const simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a);
simde_int8x8_private b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi16(zero, zero);
__m128i a128 = _mm_cvtepu8_epi16(_mm_movpi64_epi64(a_.m64));
__m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b_.m64));
__m128i a128_shr = _mm_srlv_epi16(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128),
_mm_srli_epi16(_mm_sub_epi16(a128_shr, ff), 1),
_mm_cmpgt_epi16(zero, b128));
r_.m64 = _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128));
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m256i zero = _mm256_setzero_si256();
const __m256i ff = _mm256_cmpeq_epi32(zero, zero);
__m256i a256 = _mm256_cvtepu8_epi32(_mm_movpi64_epi64(a_.m64));
__m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b_.m64));
__m256i a256_shr = _mm256_srlv_epi32(a256, _mm256_xor_si256(b256, ff));
__m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256),
_mm256_srli_epi32(_mm256_sub_epi32(a256_shr, ff), 1),
_mm256_cmpgt_epi32(zero, b256));
r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400));
r_.m64 = _mm_set_pi32(simde_mm256_extract_epi32(r256, 4), simde_mm256_extract_epi32(r256, 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t,
(b_.values[i] >= 8) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
(b_.values[i] >= -8) ? (((b_.values[i] == -8) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) :
0);
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_u8
#define vrshl_u8(a, b) simde_vrshl_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vrshl_u16 (const simde_uint16x4_t a, const simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a);
simde_int16x4_private b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i a128 = _mm_cvtepu16_epi32(_mm_movpi64_epi64(a_.m64));
__m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b_.m64));
b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24);
__m128i a128_shr = _mm_srlv_epi32(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128),
_mm_srli_epi32(_mm_sub_epi32(a128_shr, ff), 1),
_mm_cmpgt_epi32(zero, b128));
r_.m64 = _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t,
(b_.values[i] >= 16) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
(b_.values[i] >= -16) ? (((b_.values[i] == -16) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) :
0);
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_u16
#define vrshl_u16(a, b) simde_vrshl_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vrshl_u32 (const simde_uint32x2_t a, const simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a);
simde_int32x2_private b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i a128 = _mm_movpi64_epi64(a_.m64);
__m128i b128 = _mm_movpi64_epi64(b_.m64);
b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24);
__m128i a128_shr = _mm_srlv_epi32(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128),
_mm_srli_epi32(_mm_sub_epi32(a128_shr, ff), 1),
_mm_cmpgt_epi32(zero, b128));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] =
(b_.values[i] >= 32) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
(b_.values[i] >= -32) ? (((b_.values[i] == -32) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) :
0;
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_u32
#define vrshl_u32(a, b) simde_vrshl_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vrshl_u64 (const simde_uint64x1_t a, const simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshl_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a);
simde_int64x1_private b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi64(zero, zero);
__m128i a128 = _mm_movpi64_epi64(a_.m64);
__m128i b128 = _mm_movpi64_epi64(b_.m64);
b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56);
__m128i a128_shr = _mm_srlv_epi64(a128, _mm_xor_si128(b128, ff));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128),
_mm_srli_epi64(_mm_sub_epi64(a128_shr, ff), 1),
_mm_cmpgt_epi64(zero, b128));
r_.m64 = _mm_movepi64_pi64(r128);
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
const __m128i ones = _mm_set1_epi64x(1);
const __m128i a128 = _mm_movpi64_epi64(a_.m64);
__m128i b128 = _mm_movpi64_epi64(b_.m64);
__m128i b128_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF));
__m128i a128_shr = _mm_srlv_epi64(a128, _mm_sub_epi64(b128_abs, ones));
__m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128_abs),
_mm_srli_epi64(_mm_add_epi64(a128_shr, ones), 1),
_mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b128, 56)));
r_.m64 = _mm_movepi64_pi64(r128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vrshld_u64(a_.values[i], b_.values[i]);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshl_u64
#define vrshl_u64(a, b) simde_vrshl_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vrshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed char) zero = vec_splats(HEDLEY_STATIC_CAST( signed char, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) max = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 8));
SIMDE_POWER_ALTIVEC_VECTOR(signed char) a_shr;
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs;
b_abs = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_abs(b));
a_shr = vec_sra(a, vec_sub(b_abs, ones));
return vec_and(vec_sel(vec_sl(a, b_abs),
vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), ones))),
vec_cmplt(b, zero)),
vec_cmplt(b_abs, max));
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
const __m256i zero = _mm256_setzero_si256();
const __m256i ff = _mm256_cmpeq_epi16(zero, zero);
__m256i a256 = _mm256_cvtepi8_epi16(a_.m128i);
__m256i b256 = _mm256_cvtepi8_epi16(b_.m128i);
__m256i a256_shr = _mm256_srav_epi16(a256, _mm256_xor_si256(b256, ff));
__m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256),
_mm256_srai_epi16(_mm256_sub_epi16(a256_shr, ff), 1),
_mm256_cmpgt_epi16(zero, b256));
r_.m128i = _mm256_cvtepi16_epi8(r256);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t,
(simde_math_abs(b_.values[i]) >= 8) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i]));
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_s8
#define vrshlq_s8(a, b) simde_vrshlq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vrshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed short) zero = vec_splats(HEDLEY_STATIC_CAST( signed short, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) shift = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 16 - 8));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) max = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 16));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ff = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0xFF));
SIMDE_POWER_ALTIVEC_VECTOR(signed short) a_shr;
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) b_abs;
b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short),
vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))),
ff);
a_shr = vec_sra(a, vec_sub(b_abs, ones));
return vec_and(vec_sel(vec_sl(a, b_abs),
vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), ones))),
vec_cmplt(vec_sl(b, shift), zero)),
vec_cmplt(b_abs, max));
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi16(zero, zero);
__m128i B = _mm_srai_epi16(_mm_slli_epi16(b_.m128i, 8), 8);
__m128i a_shr = _mm_srav_epi16(a_.m128i, _mm_xor_si128(B, ff));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi16(a_.m128i, B),
_mm_srai_epi16(_mm_sub_epi16(a_shr, ff), 1),
_mm_cmpgt_epi16(zero, B));
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64)
const __m256i zero = _mm256_setzero_si256();
const __m256i ff = _mm256_cmpeq_epi32(zero, zero);
__m256i a256 = _mm256_cvtepi16_epi32(a_.m128i);
__m256i b256 = _mm256_cvtepi16_epi32(b_.m128i);
b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24);
__m256i a256_shr = _mm256_srav_epi32(a256, _mm256_xor_si256(b256, ff));
__m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256),
_mm256_srai_epi32(_mm256_sub_epi32(a256_shr, ff), 1),
_mm256_cmpgt_epi32(zero, b256));
r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100));
r_.m128i = _mm_set_epi64x(simde_mm256_extract_epi64(r256, 2), simde_mm256_extract_epi64(r256, 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] = HEDLEY_STATIC_CAST(int16_t,
(simde_math_abs(b_.values[i]) >= 16) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i]));
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_s16
#define vrshlq_s16(a, b) simde_vrshlq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vrshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed int) zero = vec_splats(HEDLEY_STATIC_CAST( signed int, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) shift = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32 - 8));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) max = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ff = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0xFF));
SIMDE_POWER_ALTIVEC_VECTOR(signed int) a_shr;
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) b_abs;
b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))),
ff);
a_shr = vec_sra(a, vec_sub(b_abs, ones));
return vec_and(vec_sel(vec_sl(a, b_abs),
vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), ones))),
vec_cmplt(vec_sl(b, shift), zero)),
vec_cmplt(b_abs, max));
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i B = _mm_srai_epi32(_mm_slli_epi32(b_.m128i, 24), 24);
__m128i a_shr = _mm_srav_epi32(a_.m128i, _mm_xor_si128(B, ff));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi32(a_.m128i, B),
_mm_srai_epi32(_mm_sub_epi32(a_shr, ff), 1),
_mm_cmpgt_epi32(zero, B));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] = HEDLEY_STATIC_CAST(int32_t,
(simde_math_abs(b_.values[i]) >= 32) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i]));
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_s32
#define vrshlq_s32(a, b) simde_vrshlq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vrshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed long long) zero = vec_splats(HEDLEY_STATIC_CAST( signed long long, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) shift = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64 - 8));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) max = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) ff = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 0xFF));
SIMDE_POWER_ALTIVEC_VECTOR(signed long long) a_shr;
SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) b_abs;
b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long),
vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))),
ff);
a_shr = vec_sra(a, vec_sub(b_abs, ones));
HEDLEY_DIAGNOSTIC_PUSH
#if defined(SIMDE_BUG_CLANG_46770)
SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
return vec_and(vec_sel(vec_sl(a, b_abs),
vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), ones))),
vec_cmplt(vec_sl(b, shift), zero)),
vec_cmplt(b_abs, max));
HEDLEY_DIAGNOSTIC_POP
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i B = _mm_srai_epi64(_mm_slli_epi64(b_.m128i, 56), 56);
__m128i a_shr = _mm_srav_epi64(a_.m128i, _mm_xor_si128(B, ff));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, B),
_mm_srai_epi64(_mm_sub_epi64(a_shr, ff), 1),
_mm_cmpgt_epi64(zero, B));
#elif defined(SIMDE_X86_AVX2_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ones = _mm_set1_epi64x(1);
__m128i maska = _mm_cmpgt_epi64(zero, a_.m128i);
__m128i b_abs = _mm_and_si128(_mm_abs_epi8(b_.m128i), _mm_set1_epi64x(0xFF));
__m128i a_rnd = _mm_and_si128(_mm_srlv_epi64(a_.m128i, _mm_sub_epi64(b_abs, ones)), ones);
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, b_abs),
_mm_add_epi64(_mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a_.m128i, maska), b_abs), maska), a_rnd),
_mm_cmpgt_epi64(zero, _mm_slli_epi64(b_.m128i, 56)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vrshld_s64(a_.values[i], b_.values[i]);
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_s64
#define vrshlq_s64(a, b) simde_vrshlq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vrshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed char) zero = vec_splats(HEDLEY_STATIC_CAST( signed char, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) max = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 8));
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs, b_abs_dec, a_shr;
b_abs = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_abs(b));
b_abs_dec = vec_sub(b_abs, ones);
a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max));
return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)),
vec_sr(vec_add(a_shr, ones), ones),
vec_cmplt(b, zero));
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a);
simde_int8x16_private b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
const __m256i zero = _mm256_setzero_si256();
const __m256i ff = _mm256_cmpeq_epi32(zero, zero);
__m256i a256 = _mm256_cvtepu8_epi16(a_.m128i);
__m256i b256 = _mm256_cvtepi8_epi16(b_.m128i);
__m256i a256_shr = _mm256_srlv_epi16(a256, _mm256_xor_si256(b256, ff));
__m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256),
_mm256_srli_epi16(_mm256_sub_epi16(a256_shr, ff), 1),
_mm256_cmpgt_epi16(zero, b256));
r_.m128i = _mm256_cvtepi16_epi8(r256);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(uint8_t,
(b_.values[i] >= 8) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
(b_.values[i] >= -8) ? (((b_.values[i] == -8) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) :
0);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_u8
#define vrshlq_u8(a, b) simde_vrshlq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vrshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed short) zero = vec_splats(HEDLEY_STATIC_CAST( signed short, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) shift = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 16 - 8));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) max = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 16));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ff = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0xFF));
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) b_abs, b_abs_dec, a_shr;
b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short),
vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))),
ff);
b_abs_dec = vec_sub(b_abs, ones);
a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max));
return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)),
vec_sr(vec_add(a_shr, ones), ones),
vec_cmplt(vec_sl(b, shift), zero));
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a);
simde_int16x8_private b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi16(zero, zero);
__m128i B = _mm_srai_epi16(_mm_slli_epi16(b_.m128i, 8), 8);
__m128i a_shr = _mm_srlv_epi16(a_.m128i, _mm_xor_si128(B, ff));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi16(a_.m128i, B),
_mm_srli_epi16(_mm_sub_epi16(a_shr, ff), 1),
_mm_cmpgt_epi16(zero, B));
#elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64)
const __m256i zero = _mm256_setzero_si256();
const __m256i ff = _mm256_cmpeq_epi32(zero, zero);
__m256i a256 = _mm256_cvtepu16_epi32(a_.m128i);
__m256i b256 = _mm256_cvtepi16_epi32(b_.m128i);
b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24);
__m256i a256_shr = _mm256_srlv_epi32(a256, _mm256_xor_si256(b256, ff));
__m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256),
_mm256_srli_epi32(_mm256_sub_epi32(a256_shr, ff), 1),
_mm256_cmpgt_epi32(zero, b256));
r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100));
r_.m128i = _mm_set_epi64x(simde_mm256_extract_epi64(r256, 2), simde_mm256_extract_epi64(r256, 0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] = HEDLEY_STATIC_CAST(uint16_t,
(b_.values[i] >= 16) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
(b_.values[i] >= -16) ? (((b_.values[i] == -16) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) :
0);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_u16
#define vrshlq_u16(a, b) simde_vrshlq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vrshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed int) zero = vec_splats(HEDLEY_STATIC_CAST( signed int, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) shift = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32 - 8));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) max = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ff = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0xFF));
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) b_abs, b_abs_dec, a_shr;
b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))),
ff);
b_abs_dec = vec_sub(b_abs, ones);
a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max));
return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)),
vec_sr(vec_add(a_shr, ones), ones),
vec_cmplt(vec_sl(b, shift), zero));
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a);
simde_int32x4_private b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_AVX2_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi32(zero, zero);
__m128i B = _mm_srai_epi32(_mm_slli_epi32(b_.m128i, 24), 24);
__m128i a_shr = _mm_srlv_epi32(a_.m128i, _mm_xor_si128(B, ff));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi32(a_.m128i, B),
_mm_srli_epi32(_mm_sub_epi32(a_shr, ff), 1),
_mm_cmpgt_epi32(zero, B));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]);
r_.values[i] =
(b_.values[i] >= 32) ? 0 :
(b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) :
(b_.values[i] >= -32) ? (((b_.values[i] == -32) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) :
0;
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_u32
#define vrshlq_u32(a, b) simde_vrshlq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vrshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrshlq_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR( signed long long) zero = vec_splats(HEDLEY_STATIC_CAST( signed long long, 0));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 1));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) shift = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64 - 8));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) max = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64));
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) ff = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 0xFF));
SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) b_abs, b_abs_dec, a_shr;
b_abs = vec_and(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long),
vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))),
ff);
b_abs_dec = vec_sub(b_abs, ones);
a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max));
HEDLEY_DIAGNOSTIC_PUSH
#if defined(SIMDE_BUG_CLANG_46770)
SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)),
vec_sr(vec_add(a_shr, ones), ones),
vec_cmplt(vec_sl(b, shift), zero));
HEDLEY_DIAGNOSTIC_POP
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a);
simde_int64x2_private b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
const __m128i zero = _mm_setzero_si128();
const __m128i ff = _mm_cmpeq_epi64(zero, zero);
__m128i B = _mm_srai_epi64(_mm_slli_epi64(b_.m128i, 56), 56);
__m128i a_shr = _mm_srlv_epi64(a_.m128i, _mm_xor_si128(B, ff));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, B),
_mm_srli_epi64(_mm_sub_epi64(a_shr, ff), 1),
_mm_cmpgt_epi64(zero, B));
#elif defined(SIMDE_X86_AVX2_NATIVE)
const __m128i ones = _mm_set1_epi64x(1);
__m128i b_abs = _mm_and_si128(_mm_abs_epi8(b_.m128i), _mm_set1_epi64x(0xFF));
__m128i a_shr = _mm_srlv_epi64(a_.m128i, _mm_sub_epi64(b_abs, ones));
r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, b_abs),
_mm_srli_epi64(_mm_add_epi64(a_shr, ones), 1),
_mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b_.m128i, 56)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vrshld_u64(a_.values[i], b_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrshlq_u64
#define vrshlq_u64(a, b) simde_vrshlq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RSHL_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/rhadd.h | .h | 17,720 | 433 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
/* Formula to average two unsigned integers without overflow is from Hacker's Delight (ISBN 978-0-321-84268-8).
* https://web.archive.org/web/20180831033349/http://hackersdelight.org/basics2.pdf#G525596
* avg_u = (x | y) - ((x ^ y) >> 1);
*
* Formula to average two signed integers (without widening):
* avg_s = (x >> 1) + (y >> 1) + ((x | y) & 1); // use arithmetic shifts
*
* If hardware has avg_u but not avg_s then rebase input to be unsigned.
* For example: s8 (-128..127) can be converted to u8 (0..255) by adding +128.
* Idea borrowed from Intel's ARM_NEON_2_x86_SSE project.
* https://github.com/intel/ARM_NEON_2_x86_SSE/blob/3c9879bf2dbef3274e0ed20f93cb8da3a2115ba1/NEON_2_SSE.h#L3171
* avg_s8 = avg_u8(a ^ 0x80, b ^ 0x80) ^ 0x80;
*/
#if !defined(SIMDE_ARM_NEON_RHADD_H)
#define SIMDE_ARM_NEON_RHADD_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vrhadd_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhadd_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int8_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int8_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int8_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(int8_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(int8_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(int8_t, 1)));
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhadd_s8
#define vrhadd_s8(a, b) simde_vrhadd_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vrhadd_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhadd_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_add_pi16(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, 1))),
_mm_add_pi16(_m_psrawi(a_.m64, 1), _m_psrawi(b_.m64, 1)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int16_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int16_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int16_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(int16_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(int16_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(int16_t, 1)));
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhadd_s16
#define vrhadd_s16(a, b) simde_vrhadd_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vrhadd_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhadd_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_add_pi32(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, 1))),
_mm_add_pi32(_m_psradi(a_.m64, 1), _m_psradi(b_.m64, 1)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int32_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int32_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int32_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(int32_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(int32_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(int32_t, 1)));
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhadd_s32
#define vrhadd_s32(a, b) simde_vrhadd_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vrhadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhadd_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(uint8_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(uint8_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(uint8_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(uint8_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(uint8_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(uint8_t, 1)));
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhadd_u8
#define vrhadd_u8(a, b) simde_vrhadd_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vrhadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhadd_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_add_pi16(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, 1))),
_mm_add_pi16(_mm_srli_pi16(a_.m64, 1), _mm_srli_pi16(b_.m64, 1)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(uint16_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(uint16_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(uint16_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(uint16_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(uint16_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(uint16_t, 1)));
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhadd_u16
#define vrhadd_u16(a, b) simde_vrhadd_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vrhadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhadd_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_add_pi32(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, 1))),
_mm_add_pi32(_mm_srli_pi32(a_.m64, 1), _mm_srli_pi32(b_.m64, 1)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(uint32_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(uint32_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(uint32_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(uint32_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(uint32_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(uint32_t, 1)));
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhadd_u32
#define vrhadd_u32(a, b) simde_vrhadd_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vrhaddq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhaddq_s8(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
const __m128i msb = _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, -128)); /* 0x80 */
r_.m128i = _mm_xor_si128(_mm_avg_epu8(_mm_xor_si128(a_.m128i, msb), _mm_xor_si128(b_.m128i, msb)), msb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t msb = wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, -128)); /* 0x80 */
r_.v128 = wasm_v128_xor(wasm_u8x16_avgr(wasm_v128_xor(a_.v128, msb), wasm_v128_xor(b_.v128, msb)), msb);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int8_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int8_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int8_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(int8_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(int8_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(int8_t, 1)));
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhaddq_s8
#define vrhaddq_s8(a, b) simde_vrhaddq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vrhaddq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhaddq_s16(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
const __m128i msb = _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, -32768)); /* 0x8000 */
r_.m128i = _mm_xor_si128(_mm_avg_epu16(_mm_xor_si128(a_.m128i, msb), _mm_xor_si128(b_.m128i, msb)), msb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t msb = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, -32768)); /* 0x8000 */
r_.v128 = wasm_v128_xor(wasm_u16x8_avgr(wasm_v128_xor(a_.v128, msb), wasm_v128_xor(b_.v128, msb)), msb);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int16_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int16_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int16_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(int16_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(int16_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(int16_t, 1)));
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhaddq_s16
#define vrhaddq_s16(a, b) simde_vrhaddq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vrhaddq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhaddq_s32(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_add_epi32(_mm_and_si128(_mm_or_si128(a_.m128i, b_.m128i), _mm_set1_epi32(1)),
_mm_add_epi32(_mm_srai_epi32(a_.m128i, 1), _mm_srai_epi32(b_.m128i, 1)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_add(wasm_v128_and(wasm_v128_or(a_.v128, b_.v128), wasm_i32x4_splat(1)),
wasm_i32x4_add(wasm_i32x4_shr(a_.v128, 1), wasm_i32x4_shr(b_.v128, 1)));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int32_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int32_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int32_t, 1)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (((a_.values[i] >> HEDLEY_STATIC_CAST(int32_t, 1)) + (b_.values[i] >> HEDLEY_STATIC_CAST(int32_t, 1))) + ((a_.values[i] | b_.values[i]) & HEDLEY_STATIC_CAST(int32_t, 1)));
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhaddq_s32
#define vrhaddq_s32(a, b) simde_vrhaddq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vrhaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhaddq_u8(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_avg_epu8(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u8x16_avgr(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (a_.values | b_.values) - ((a_.values ^ b_.values) >> HEDLEY_STATIC_CAST(uint8_t, 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] | b_.values[i]) - ((a_.values[i] ^ b_.values[i]) >> HEDLEY_STATIC_CAST(uint8_t, 1));
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhaddq_u8
#define vrhaddq_u8(a, b) simde_vrhaddq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vrhaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhaddq_u16(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_avg_epu16(a_.m128i, b_.m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u16x8_avgr(a_.v128, b_.v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (a_.values | b_.values) - ((a_.values ^ b_.values) >> HEDLEY_STATIC_CAST(uint16_t, 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] | b_.values[i]) - ((a_.values[i] ^ b_.values[i]) >> HEDLEY_STATIC_CAST(uint16_t, 1));
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhaddq_u16
#define vrhaddq_u16(a, b) simde_vrhaddq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vrhaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrhaddq_u32(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_sub_epi32(_mm_or_si128(a_.m128i, b_.m128i), _mm_srli_epi32(_mm_xor_si128(a_.m128i, b_.m128i), 1));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i32x4_sub(wasm_v128_or(a_.v128, b_.v128), wasm_u32x4_shr(wasm_v128_xor(a_.v128, b_.v128), 1));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (a_.values | b_.values) - ((a_.values ^ b_.values) >> HEDLEY_STATIC_CAST(uint32_t, 1));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (a_.values[i] | b_.values[i]) - ((a_.values[i] ^ b_.values[i]) >> HEDLEY_STATIC_CAST(uint32_t, 1));
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrhaddq_u32
#define vrhaddq_u32(a, b) simde_vrhaddq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RHADD_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/shrn_n.h | .h | 5,227 | 154 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_SHRN_N_H)
#define SIMDE_ARM_NEON_SHRN_N_H
#include "types.h"
#include "reinterpret.h"
#include "movn.h"
#include "shr_n.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vshrn_n_s16 (const simde_int16x8_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) {
simde_int8x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (a_.values[i] >> n) & UINT8_MAX);
}
return simde_int8x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshrn_n_s16(a, n) vshrn_n_s16((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vshrn_n_s16(a, n) simde_vmovn_s16(simde_vshrq_n_s16((a), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshrn_n_s16
#define vshrn_n_s16(a, n) simde_vshrn_n_s16((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vshrn_n_s32 (const simde_int32x4_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) {
simde_int16x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (a_.values[i] >> n) & UINT16_MAX);
}
return simde_int16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshrn_n_s32(a, n) vshrn_n_s32((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vshrn_n_s32(a, n) simde_vmovn_s32(simde_vshrq_n_s32((a), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshrn_n_s32
#define vshrn_n_s32(a, n) simde_vshrn_n_s32((a), (n))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vshrn_n_s64 (const simde_int64x2_t a, const int n)
SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) {
simde_int32x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (a_.values[i] >> n) & UINT32_MAX);
}
return simde_int32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vshrn_n_s64(a, n) vshrn_n_s64((a), (n))
#elif SIMDE_NATURAL_VECTOR_SIZE > 0
#define simde_vshrn_n_s64(a, n) simde_vmovn_s64(simde_vshrq_n_s64((a), (n)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshrn_n_s64
#define vshrn_n_s64(a, n) simde_vshrn_n_s64((a), (n))
#endif
#define simde_vshrn_n_u16(a, n) \
simde_vreinterpret_u8_s8( \
simde_vshrn_n_s16(simde_vreinterpretq_s16_u16(a), (n)))
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#undef simde_vshrn_n_u16
#define simde_vshrn_n_u16(a, n) vshrn_n_u16((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshrn_n_u16
#define vshrn_n_u16(a, n) simde_vshrn_n_u16((a), (n))
#endif
#define simde_vshrn_n_u32(a, n) \
simde_vreinterpret_u16_s16( \
simde_vshrn_n_s32(simde_vreinterpretq_s32_u32(a), (n)))
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#undef simde_vshrn_n_u32
#define simde_vshrn_n_u32(a, n) vshrn_n_u32((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshrn_n_u32
#define vshrn_n_u32(a, n) simde_vshrn_n_u32((a), (n))
#endif
#define simde_vshrn_n_u64(a, n) \
simde_vreinterpret_u32_s32( \
simde_vshrn_n_s64(simde_vreinterpretq_s64_u64(a), (n)))
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#undef simde_vshrn_n_u64
#define simde_vshrn_n_u64(a, n) vshrn_n_u64((a), (n))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vshrn_n_u64
#define vshrn_n_u64(a, n) simde_vshrn_n_u64((a), (n))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_SHRN_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/cmla_rot270.h | .h | 5,909 | 147 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Atharva Nimbalkar <atharvakn@gmail.com>
*/
#if !defined(SIMDE_ARM_NEON_CMLA_ROT270_H)
#define SIMDE_ARM_NEON_CMLA_ROT270_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vcmla_rot270_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmla_rot270_f32(r, a, b);
#else
simde_float32x2_private
r_ = simde_float32x2_to_private(r),
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760)
a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 1, 1);
b_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, -b_.values, b_.values, 3, 0);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += b_.values[2 * i + 1] * a_.values[2 * i + 1];
r_.values[2 * i + 1] += -(b_.values[2 * i]) * a_.values[2 * i + 1];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmla_rot270_f32
#define vcmla_rot270_f32(r, a, b) simde_vcmla_rot270_f32(r, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vcmlaq_rot270_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmlaq_rot270_f32(r, a, b);
#else
simde_float32x4_private
r_ = simde_float32x4_to_private(r),
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
a_.v128 = wasm_i32x4_shuffle(a_.v128, a_.v128, 1, 1, 3, 3);
b_.v128 = wasm_i32x4_shuffle(wasm_f32x4_neg(b_.v128), b_.v128, 5, 0, 7, 2);
r_.v128 = wasm_f32x4_add(r_.v128, wasm_f32x4_mul(b_.v128, a_.v128));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 1, 1, 3, 3);
b_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, -b_.values, b_.values, 5, 0, 7, 2);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += b_.values[2 * i + 1] * a_.values[2 * i + 1];
r_.values[2 * i + 1] += -(b_.values[2 * i]) * a_.values[2 * i + 1];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmlaq_rot270_f32
#define vcmlaq_rot270_f32(r, a, b) simde_vcmlaq_rot270_f32(r, a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vcmlaq_rot270_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0))
return vcmlaq_rot270_f64(r, a, b);
#else
simde_float64x2_private
r_ = simde_float64x2_to_private(r),
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
a_.v128 = wasm_i64x2_shuffle(a_.v128, a_.v128, 1, 1);
b_.v128 = wasm_i64x2_shuffle(wasm_f64x2_neg(b_.v128), b_.v128, 3, 0);
r_.v128 = wasm_f64x2_add(r_.v128, wasm_f64x2_mul(b_.v128, a_.v128));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 1, 1);
b_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, -b_.values, b_.values, 3, 0);
r_.values += b_.values * a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) {
r_.values[2 * i] += b_.values[2 * i + 1] * a_.values[2 * i + 1];
r_.values[2 * i + 1] += -(b_.values[2 * i]) * a_.values[2 * i + 1];
}
#endif
return simde_float64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES)
#undef vcmlaq_rot270_f64
#define vcmlaq_rot270_f64(r, a, b) simde_vcmlaq_rot270_f64(r, a, b)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_CMLA_ROT270_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/trn.h | .h | 7,435 | 253 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_TRN_H) && !defined(SIMDE_BUG_INTEL_857088)
#define SIMDE_ARM_NEON_TRN_H
#include "types.h"
#include "trn1.h"
#include "trn2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2x2_t
simde_vtrn_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_f32(a, b);
#else
simde_float32x2x2_t r = { { simde_vtrn1_f32(a, b), simde_vtrn2_f32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_f32
#define vtrn_f32(a, b) simde_vtrn_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8x2_t
simde_vtrn_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_s8(a, b);
#else
simde_int8x8x2_t r = { { simde_vtrn1_s8(a, b), simde_vtrn2_s8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_s8
#define vtrn_s8(a, b) simde_vtrn_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4x2_t
simde_vtrn_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_s16(a, b);
#else
simde_int16x4x2_t r = { { simde_vtrn1_s16(a, b), simde_vtrn2_s16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_s16
#define vtrn_s16(a, b) simde_vtrn_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2x2_t
simde_vtrn_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_s32(a, b);
#else
simde_int32x2x2_t r = { { simde_vtrn1_s32(a, b), simde_vtrn2_s32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_s32
#define vtrn_s32(a, b) simde_vtrn_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8x2_t
simde_vtrn_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_u8(a, b);
#else
simde_uint8x8x2_t r = { { simde_vtrn1_u8(a, b), simde_vtrn2_u8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_u8
#define vtrn_u8(a, b) simde_vtrn_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4x2_t
simde_vtrn_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_u16(a, b);
#else
simde_uint16x4x2_t r = { { simde_vtrn1_u16(a, b), simde_vtrn2_u16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_u16
#define vtrn_u16(a, b) simde_vtrn_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2x2_t
simde_vtrn_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrn_u32(a, b);
#else
simde_uint32x2x2_t r = { { simde_vtrn1_u32(a, b), simde_vtrn2_u32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrn_u32
#define vtrn_u32(a, b) simde_vtrn_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4x2_t
simde_vtrnq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_f32(a, b);
#else
simde_float32x4x2_t r = { { simde_vtrn1q_f32(a, b), simde_vtrn2q_f32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_f32
#define vtrnq_f32(a, b) simde_vtrnq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16x2_t
simde_vtrnq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_s8(a, b);
#else
simde_int8x16x2_t r = { { simde_vtrn1q_s8(a, b), simde_vtrn2q_s8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_s8
#define vtrnq_s8(a, b) simde_vtrnq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8x2_t
simde_vtrnq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_s16(a, b);
#else
simde_int16x8x2_t r = { { simde_vtrn1q_s16(a, b), simde_vtrn2q_s16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_s16
#define vtrnq_s16(a, b) simde_vtrnq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4x2_t
simde_vtrnq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_s32(a, b);
#else
simde_int32x4x2_t r = { { simde_vtrn1q_s32(a, b), simde_vtrn2q_s32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_s32
#define vtrnq_s32(a, b) simde_vtrnq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16x2_t
simde_vtrnq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_u8(a, b);
#else
simde_uint8x16x2_t r = { { simde_vtrn1q_u8(a, b), simde_vtrn2q_u8(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_u8
#define vtrnq_u8(a, b) simde_vtrnq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8x2_t
simde_vtrnq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_u16(a, b);
#else
simde_uint16x8x2_t r = { { simde_vtrn1q_u16(a, b), simde_vtrn2q_u16(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_u16
#define vtrnq_u16(a, b) simde_vtrnq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4x2_t
simde_vtrnq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vtrnq_u32(a, b);
#else
simde_uint32x4x2_t r = { { simde_vtrn1q_u32(a, b), simde_vtrn2q_u32(a, b) } };
return r;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vtrnq_u32
#define vtrnq_u32(a, b) simde_vtrnq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_TRN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/st3_lane.h | .h | 14,199 | 427 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ST3_LANE_H)
#define SIMDE_ARM_NEON_ST3_LANE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int8x8x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst3_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int8x8_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int8x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_s8
#define vst3_lane_s8(a, b, c) simde_vst3_lane_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int16x4x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst3_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int16x4_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int16x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_s16
#define vst3_lane_s16(a, b, c) simde_vst3_lane_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int32x2x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst3_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int32x2_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int32x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_s32
#define vst3_lane_s32(a, b, c) simde_vst3_lane_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int64x1x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
HEDLEY_STATIC_CAST(void, lane);
vst3_lane_s64(ptr, val, 0);
#else
simde_int64x1_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int64x1_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_s64
#define vst3_lane_s64(a, b, c) simde_vst3_lane_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint8x8x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst3_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint8x8_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint8x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_u8
#define vst3_lane_u8(a, b, c) simde_vst3_lane_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint16x4x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst3_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint16x4_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint16x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_u16
#define vst3_lane_u16(a, b, c) simde_vst3_lane_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint32x2x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst3_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint32x2_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint32x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_u32
#define vst3_lane_u32(a, b, c) simde_vst3_lane_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint64x1x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
HEDLEY_STATIC_CAST(void, lane);
vst3_lane_u64(ptr, val, 0);
#else
simde_uint64x1_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint64x1_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_u64
#define vst3_lane_u64(a, b, c) simde_vst3_lane_u64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float32x2x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst3_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float32x2_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_float32x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_f32
#define vst3_lane_f32(a, b, c) simde_vst3_lane_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float64x1x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
HEDLEY_STATIC_CAST(void, lane);
vst3_lane_f64(ptr, val, 0);
#else
simde_float64x1_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_float64x1_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3_lane_f64
#define vst3_lane_f64(a, b, c) simde_vst3_lane_f64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int8x16x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_16_NO_RESULT_(vst3q_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int8x16_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int8x16_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_s8
#define vst3q_lane_s8(a, b, c) simde_vst3q_lane_s8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int16x8x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst3q_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int16x8_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int16x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_s16
#define vst3q_lane_s16(a, b, c) simde_vst3q_lane_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int32x4x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst3q_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int32x4_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int32x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_s32
#define vst3q_lane_s32(a, b, c) simde_vst3q_lane_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int64x2x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst3q_lane_s64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_int64x2_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_int64x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_s64
#define vst3q_lane_s64(a, b, c) simde_vst3q_lane_s64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint8x16x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_16_NO_RESULT_(vst3q_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint8x16_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint8x16_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_u8
#define vst3q_lane_u8(a, b, c) simde_vst3q_lane_u8((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint16x8x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_8_NO_RESULT_(vst3q_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint16x8_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint16x8_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_u16
#define vst3q_lane_u16(a, b, c) simde_vst3q_lane_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint32x4x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst3q_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint32x4_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint32x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_u32
#define vst3q_lane_u32(a, b, c) simde_vst3q_lane_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint64x2x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst3q_lane_u64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_uint64x2_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_uint64x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_u64
#define vst3q_lane_u64(a, b, c) simde_vst3q_lane_u64((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float32x4x3_t val, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_CONSTIFY_4_NO_RESULT_(vst3q_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float32x4_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_float32x4_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_f32
#define vst3q_lane_f32(a, b, c) simde_vst3q_lane_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_vst3q_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float64x2x3_t val, const int lane){
//SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_CONSTIFY_2_NO_RESULT_(vst3q_lane_f64, HEDLEY_UNREACHABLE(), lane, ptr, val);
#else
simde_float64x2_private r;
for (size_t i = 0 ; i < 3 ; i++) {
r = simde_float64x2_to_private(val.val[i]);
ptr[i] = r.values[lane];
}
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vst3q_lane_f64
#define vst3q_lane_f64(a, b, c) simde_vst3q_lane_f64((a), (b), (c))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ST3_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mul_n.h | .h | 6,102 | 211 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MUL_N_H)
#define SIMDE_ARM_NEON_MUL_N_H
#include "types.h"
#include "mul.h"
#include "dup_n.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmul_n_f32(simde_float32x2_t a, simde_float32 b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_n_f32(a, b);
#else
return simde_vmul_f32(a, simde_vdup_n_f32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_n_f32
#define vmul_n_f32(a, b) simde_vmul_n_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmul_n_f64(simde_float64x1_t a, simde_float64 b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmul_n_f64(a, b);
#else
return simde_vmul_f64(a, simde_vdup_n_f64(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_n_f64
#define vmul_n_f64(a, b) simde_vmul_n_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmul_n_s16(simde_int16x4_t a, int16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_n_s16(a, b);
#else
return simde_vmul_s16(a, simde_vdup_n_s16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_n_s16
#define vmul_n_s16(a, b) simde_vmul_n_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmul_n_s32(simde_int32x2_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_n_s32(a, b);
#else
return simde_vmul_s32(a, simde_vdup_n_s32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_n_s32
#define vmul_n_s32(a, b) simde_vmul_n_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmul_n_u16(simde_uint16x4_t a, uint16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_n_u16(a, b);
#else
return simde_vmul_u16(a, simde_vdup_n_u16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_n_u16
#define vmul_n_u16(a, b) simde_vmul_n_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmul_n_u32(simde_uint32x2_t a, uint32_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmul_n_u32(a, b);
#else
return simde_vmul_u32(a, simde_vdup_n_u32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_n_u32
#define vmul_n_u32(a, b) simde_vmul_n_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmulq_n_f32(simde_float32x4_t a, simde_float32 b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_n_f32(a, b);
#else
return simde_vmulq_f32(a, simde_vdupq_n_f32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_n_f32
#define vmulq_n_f32(a, b) simde_vmulq_n_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmulq_n_f64(simde_float64x2_t a, simde_float64 b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmulq_n_f64(a, b);
#else
return simde_vmulq_f64(a, simde_vdupq_n_f64(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_n_f64
#define vmulq_n_f64(a, b) simde_vmulq_n_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmulq_n_s16(simde_int16x8_t a, int16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_n_s16(a, b);
#else
return simde_vmulq_s16(a, simde_vdupq_n_s16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_n_s16
#define vmulq_n_s16(a, b) simde_vmulq_n_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmulq_n_s32(simde_int32x4_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_n_s32(a, b);
#else
return simde_vmulq_s32(a, simde_vdupq_n_s32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_n_s32
#define vmulq_n_s32(a, b) simde_vmulq_n_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmulq_n_u16(simde_uint16x8_t a, uint16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_n_u16(a, b);
#else
return simde_vmulq_u16(a, simde_vdupq_n_u16(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_n_u16
#define vmulq_n_u16(a, b) simde_vmulq_n_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmulq_n_u32(simde_uint32x4_t a, uint32_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmulq_n_u32(a, b);
#else
return simde_vmulq_u32(a, simde_vdupq_n_u32(b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_n_u32
#define vmulq_n_u32(a, b) simde_vmulq_n_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MUL_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/addw.h | .h | 7,501 | 223 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_ADDW_H)
#define SIMDE_ARM_NEON_ADDW_H
#include "types.h"
#include "add.h"
#include "movl.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vaddw_s8(simde_int16x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddw_s8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_s16(a, simde_vmovl_s8(b));
#else
simde_int16x8_private r_;
simde_int16x8_private a_ = simde_int16x8_to_private(a);
simde_int8x8_private b_ = simde_int8x8_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values += a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddw_s8
#define vaddw_s8(a, b) simde_vaddw_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vaddw_s16(simde_int32x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddw_s16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_s32(a, simde_vmovl_s16(b));
#else
simde_int32x4_private r_;
simde_int32x4_private a_ = simde_int32x4_to_private(a);
simde_int16x4_private b_ = simde_int16x4_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values += a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddw_s16
#define vaddw_s16(a, b) simde_vaddw_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vaddw_s32(simde_int64x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddw_s32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_s64(a, simde_vmovl_s32(b));
#else
simde_int64x2_private r_;
simde_int64x2_private a_ = simde_int64x2_to_private(a);
simde_int32x2_private b_ = simde_int32x2_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values += a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddw_s32
#define vaddw_s32(a, b) simde_vaddw_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vaddw_u8(simde_uint16x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddw_u8(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_u16(a, simde_vmovl_u8(b));
#else
simde_uint16x8_private r_;
simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
simde_uint8x8_private b_ = simde_uint8x8_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values += a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddw_u8
#define vaddw_u8(a, b) simde_vaddw_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vaddw_u16(simde_uint32x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddw_u16(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_u32(a, simde_vmovl_u16(b));
#else
simde_uint32x4_private r_;
simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
simde_uint16x4_private b_ = simde_uint16x4_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values += a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddw_u16
#define vaddw_u16(a, b) simde_vaddw_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vaddw_u32(simde_uint64x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vaddw_u32(a, b);
#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128)
return simde_vaddq_u64(a, simde_vmovl_u32(b));
#else
simde_uint64x2_private r_;
simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
simde_uint32x2_private b_ = simde_uint32x2_to_private(b);
#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.values, b_.values);
r_.values += a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] + b_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vaddw_u32
#define vaddw_u32(a, b) simde_vaddw_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ADDW_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1q_x2.h | .h | 10,204 | 279 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
* 2021 Décio Luiz Gazzoni Filho <decio@decpp.net>
*/
#if !defined(SIMDE_ARM_NEON_LD1Q_X2_H)
#define SIMDE_ARM_NEON_LD1Q_X2_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_
#endif
SIMDE_BEGIN_DECLS_
#if !defined(SIMDE_BUG_INTEL_857088)
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4x2_t
simde_vld1q_f32_x2(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_f32_x2(ptr);
#else
simde_float32x4_private a_[2];
for (size_t i = 0; i < 8; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_float32x4x2_t s_ = { { simde_float32x4_from_private(a_[0]),
simde_float32x4_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_f32_x2
#define vld1q_f32_x2(a) simde_vld1q_f32_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2x2_t
simde_vld1q_f64_x2(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if \
defined(SIMDE_ARM_NEON_A64V8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) && \
(!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))
return vld1q_f64_x2(ptr);
#else
simde_float64x2_private a_[2];
for (size_t i = 0; i < 4; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_float64x2x2_t s_ = { { simde_float64x2_from_private(a_[0]),
simde_float64x2_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1q_f64_x2
#define vld1q_f64_x2(a) simde_vld1q_f64_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16x2_t
simde_vld1q_s8_x2(int8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s8_x2(ptr);
#else
simde_int8x16_private a_[2];
for (size_t i = 0; i < 32; i++) {
a_[i / 16].values[i % 16] = ptr[i];
}
simde_int8x16x2_t s_ = { { simde_int8x16_from_private(a_[0]),
simde_int8x16_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s8_x2
#define vld1q_s8_x2(a) simde_vld1q_s8_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8x2_t
simde_vld1q_s16_x2(int16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s16_x2(ptr);
#else
simde_int16x8_private a_[2];
for (size_t i = 0; i < 16; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_int16x8x2_t s_ = { { simde_int16x8_from_private(a_[0]),
simde_int16x8_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s16_x2
#define vld1q_s16_x2(a) simde_vld1q_s16_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4x2_t
simde_vld1q_s32_x2(int32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s32_x2(ptr);
#else
simde_int32x4_private a_[2];
for (size_t i = 0; i < 8; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_int32x4x2_t s_ = { { simde_int32x4_from_private(a_[0]),
simde_int32x4_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s32_x2
#define vld1q_s32_x2(a) simde_vld1q_s32_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2x2_t
simde_vld1q_s64_x2(int64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_s64_x2(ptr);
#else
simde_int64x2_private a_[2];
for (size_t i = 0; i < 4; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_int64x2x2_t s_ = { { simde_int64x2_from_private(a_[0]),
simde_int64x2_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_s64_x2
#define vld1q_s64_x2(a) simde_vld1q_s64_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16x2_t
simde_vld1q_u8_x2(uint8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u8_x2(ptr);
#else
simde_uint8x16_private a_[2];
for (size_t i = 0; i < 32; i++) {
a_[i / 16].values[i % 16] = ptr[i];
}
simde_uint8x16x2_t s_ = { { simde_uint8x16_from_private(a_[0]),
simde_uint8x16_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u8_x2
#define vld1q_u8_x2(a) simde_vld1q_u8_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8x2_t
simde_vld1q_u16_x2(uint16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u16_x2(ptr);
#else
simde_uint16x8_private a_[2];
for (size_t i = 0; i < 16; i++) {
a_[i / 8].values[i % 8] = ptr[i];
}
simde_uint16x8x2_t s_ = { { simde_uint16x8_from_private(a_[0]),
simde_uint16x8_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u16_x2
#define vld1q_u16_x2(a) simde_vld1q_u16_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4x2_t
simde_vld1q_u32_x2(uint32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u32_x2(ptr);
#else
simde_uint32x4_private a_[2];
for (size_t i = 0; i < 8; i++) {
a_[i / 4].values[i % 4] = ptr[i];
}
simde_uint32x4x2_t s_ = { { simde_uint32x4_from_private(a_[0]),
simde_uint32x4_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u32_x2
#define vld1q_u32_x2(a) simde_vld1q_u32_x2((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2x2_t
simde_vld1q_u64_x2(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) {
#if \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \
(!defined(__clang__) || (SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE)))
return vld1q_u64_x2(ptr);
#else
simde_uint64x2_private a_[2];
for (size_t i = 0; i < 4; i++) {
a_[i / 2].values[i % 2] = ptr[i];
}
simde_uint64x2x2_t s_ = { { simde_uint64x2_from_private(a_[0]),
simde_uint64x2_from_private(a_[1]) } };
return s_;
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_u64_x2
#define vld1q_u64_x2(a) simde_vld1q_u64_x2((a))
#endif
#endif /* !defined(SIMDE_BUG_INTEL_857088) */
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1Q_X2_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/qsub.h | .h | 22,099 | 686 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_QSUB_H)
#define SIMDE_ARM_NEON_QSUB_H
#include "types.h"
#include "sub.h"
#include "bsl.h"
#include "cgt.h"
#include "dup_n.h"
#include "sub.h"
#include <limits.h>
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
int8_t
simde_vqsubb_s8(int8_t a, int8_t b) {
return simde_math_subs_i8(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubb_s8
#define vqsubb_s8(a, b) simde_vqsubb_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_vqsubh_s16(int16_t a, int16_t b) {
return simde_math_subs_i16(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubh_s16
#define vqsubh_s16(a, b) simde_vqsubh_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_vqsubs_s32(int32_t a, int32_t b) {
return simde_math_subs_i32(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubs_s32
#define vqsubs_s32(a, b) simde_vqsubs_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_vqsubd_s64(int64_t a, int64_t b) {
return simde_math_subs_i64(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubd_s64
#define vqsubd_s64(a, b) simde_vqsubd_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint8_t
simde_vqsubb_u8(uint8_t a, uint8_t b) {
return simde_math_subs_u8(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubb_u8
#define vqsubb_u8(a, b) simde_vqsubb_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint16_t
simde_vqsubh_u16(uint16_t a, uint16_t b) {
return simde_math_subs_u16(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubh_u16
#define vqsubh_u16(a, b) simde_vqsubh_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_vqsubs_u32(uint32_t a, uint32_t b) {
return simde_math_subs_u32(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubs_u32
#define vqsubs_u32(a, b) simde_vqsubs_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_vqsubd_u64(uint64_t a, uint64_t b) {
return simde_math_subs_u64(a, b);
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vqsubd_u64
#define vqsubd_u64(a, b) simde_vqsubd_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vqsub_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_s8(a, b);
#else
simde_int8x8_private
r_,
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_subs_pi8(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT8_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 7;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]);
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_s8
#define vqsub_s8(a, b) simde_vqsub_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vqsub_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_s16(a, b);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_subs_pi16(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT16_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 15;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]);
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_s16
#define vqsub_s16(a, b) simde_vqsub_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vqsub_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_s32(a, b);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT32_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 31;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]);
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_s32
#define vqsub_s32(a, b) simde_vqsub_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vqsub_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_s64(a, b);
#else
simde_int64x1_private
r_,
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT64_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 63;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]);
}
#endif
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_s64
#define vqsub_s64(a, b) simde_vqsub_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vqsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_u8(a, b);
#else
simde_uint8x8_private
r_,
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_subs_pu8(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]);
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_u8
#define vqsub_u8(a, b) simde_vqsub_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vqsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_u16(a, b);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_X86_MMX_NATIVE)
r_.m64 = _mm_subs_pu16(a_.m64, b_.m64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]);
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_u16
#define vqsub_u16(a, b) simde_vqsub_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vqsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_u32(a, b);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]);
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_u32
#define vqsub_u32(a, b) simde_vqsub_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vqsub_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsub_u64(a, b);
#else
simde_uint64x1_private
r_,
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]);
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsub_u64
#define vqsub_u64(a, b) simde_vqsub_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vqsubq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_subs(a, b);
#else
simde_int8x16_private
r_,
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i8x16_sub_sat(a_.v128, b_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_subs_epi8(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT8_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 7;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]);
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_s8
#define vqsubq_s8(a, b) simde_vqsubq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vqsubq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_subs(a, b);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_i16x8_sub_sat(a_.v128, b_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_subs_epi16(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT16_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 15;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]);
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_s16
#define vqsubq_s16(a, b) simde_vqsubq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vqsubq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_subs(a, b);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
const __m128i diff_sat = _mm_xor_si128(_mm_set1_epi32(INT32_MAX), _mm_cmpgt_epi32(b_.m128i, a_.m128i));
const __m128i diff = _mm_sub_epi32(a_.m128i, b_.m128i);
const __m128i t = _mm_xor_si128(diff_sat, diff);
#if defined(SIMDE_X86_SSE4_1_NATIVE)
r_.m128i =
_mm_castps_si128(
_mm_blendv_ps(
_mm_castsi128_ps(diff),
_mm_castsi128_ps(diff_sat),
_mm_castsi128_ps(t)
)
);
#else
r_.m128i = _mm_xor_si128(diff, _mm_and_si128(t, _mm_srai_epi32(t, 31)));
#endif
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT32_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 31;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]);
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_s32
#define vqsubq_s32(a, b) simde_vqsubq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vqsubq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_s64(a, b);
#else
simde_int64x2_private
r_,
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT64_MAX);
const __typeof__(r_.values) diff = a_.values - b_.values;
const __typeof__(r_.values) saturate = diff_sat ^ diff;
const __typeof__(r_.values) m = saturate >> 63;
r_.values = (diff_sat & m) | (diff & ~m);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]);
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_s64
#define vqsubq_s64(a, b) simde_vqsubq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vqsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_subs(a, b);
#else
simde_uint8x16_private
r_,
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u8x16_sub_sat(a_.v128, b_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_subs_epu8(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values <= a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]);
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_u8
#define vqsubq_u8(a, b) simde_vqsubq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vqsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_subs(a, b);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.v128 = wasm_u16x8_sub_sat(a_.v128, b_.v128);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.m128i = _mm_subs_epu16(a_.m128i, b_.m128i);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values <= a_.values);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]);
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_u16
#define vqsubq_u16(a, b) simde_vqsubq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vqsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_subs(a, b);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_X86_SSE2_NATIVE)
const __m128i i32_min = _mm_set1_epi32(INT32_MIN);
const __m128i difference = _mm_sub_epi32(a_.m128i, b_.m128i);
r_.m128i =
_mm_and_si128(
difference,
_mm_xor_si128(
_mm_cmpgt_epi32(
_mm_xor_si128(difference, i32_min),
_mm_xor_si128(a_.m128i, i32_min)
),
_mm_set1_epi32(~INT32_C(0))
)
);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]);
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_u32
#define vqsubq_u32(a, b) simde_vqsubq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vqsubq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vqsubq_u64(a, b);
#else
simde_uint64x2_private
r_,
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = a_.values - b_.values;
r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]);
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vqsubq_u64
#define vqsubq_u64(a, b) simde_vqsubq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_QSUB_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/orn.h | .h | 14,783 | 506 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_ORN_H)
#define SIMDE_ARM_NEON_ORN_H
#include "orr.h"
#include "mvn.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vorn_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_s8(a, b);
#else
simde_int8x8_private
a_ = simde_int8x8_to_private(a),
b_ = simde_int8x8_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_s8
#define vorn_s8(a, b) simde_vorn_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vorn_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_s16(a, b);
#else
simde_int16x4_private
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_s16
#define vorn_s16(a, b) simde_vorn_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vorn_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_s32(a, b);
#else
simde_int32x2_private
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_s32
#define vorn_s32(a, b) simde_vorn_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t
simde_vorn_s64(simde_int64x1_t a, simde_int64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_s64(a, b);
#else
simde_int64x1_private
a_ = simde_int64x1_to_private(a),
b_ = simde_int64x1_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_s64
#define vorn_s64(a, b) simde_vorn_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vorn_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_u8(a, b);
#else
simde_uint8x8_private
a_ = simde_uint8x8_to_private(a),
b_ = simde_uint8x8_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint8x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_u8
#define vorn_u8(a, b) simde_vorn_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vorn_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_u16(a, b);
#else
simde_uint16x4_private
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_u16
#define vorn_u16(a, b) simde_vorn_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vorn_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_u32(a, b);
#else
simde_uint32x2_private
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_u32
#define vorn_u32(a, b) simde_vorn_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t
simde_vorn_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vorn_u64(a, b);
#else
simde_uint64x1_private
a_ = simde_uint64x1_to_private(a),
b_ = simde_uint64x1_to_private(b),
r_;
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint64x1_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vorn_u64
#define vorn_u64(a, b) simde_vorn_u64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vornq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_s8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_int8x16_private
a_ = simde_int8x16_to_private(a),
b_ = simde_int8x16_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_s8
#define vornq_s8(a, b) simde_vornq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vornq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_s16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_int16x8_private
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_s16
#define vornq_s16(a, b) simde_vornq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vornq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_s32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_int32x4_private
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_s32
#define vornq_s32(a, b) simde_vornq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vornq_s64(simde_int64x2_t a, simde_int64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_s64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_int64x2_private
a_ = simde_int64x2_to_private(a),
b_ = simde_int64x2_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_int64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_s64
#define vornq_s64(a, b) simde_vornq_s64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vornq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_u8(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_uint8x16_private
a_ = simde_uint8x16_to_private(a),
b_ = simde_uint8x16_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint8x16_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_u8
#define vornq_u8(a, b) simde_vornq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vornq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_u16(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_uint16x8_private
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_u16
#define vornq_u16(a, b) simde_vornq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vornq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_u32(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_uint32x4_private
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_u32
#define vornq_u32(a, b) simde_vornq_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vornq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vornq_u64(a, b);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
return vec_orc(a, b);
#else
simde_uint64x2_private
a_ = simde_uint64x2_to_private(a),
b_ = simde_uint64x2_to_private(b),
r_;
#if defined(SIMDE_X86_AVX512VL_NATIVE)
r_.m128i = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, a_.m128i, 0xf3);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.values = a_.values | ~(b_.values);
#else
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] | ~b_.values[i];
}
#endif
return simde_uint64x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vornq_u64
#define vornq_u64(a, b) simde_vornq_u64((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_ORN_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/ld1_lane.h | .h | 14,058 | 360 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_LD1_LANE_H)
#define SIMDE_ARM_NEON_LD1_LANE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t simde_vld1_lane_s8(int8_t const *ptr, simde_int8x8_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_int8x8_private r = simde_int8x8_to_private(src);
r.values[lane] = *ptr;
return simde_int8x8_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_s8(ptr, src, lane) vld1_lane_s8(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_s8
#define vld1_lane_s8(ptr, src, lane) simde_vld1_lane_s8((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t simde_vld1_lane_s16(int16_t const *ptr, simde_int16x4_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int16x4_private r = simde_int16x4_to_private(src);
r.values[lane] = *ptr;
return simde_int16x4_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_s16(ptr, src, lane) vld1_lane_s16(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_s16
#define vld1_lane_s16(ptr, src, lane) simde_vld1_lane_s16((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t simde_vld1_lane_s32(int32_t const *ptr, simde_int32x2_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_int32x2_private r = simde_int32x2_to_private(src);
r.values[lane] = *ptr;
return simde_int32x2_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_s32(ptr, src, lane) vld1_lane_s32(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_s32
#define vld1_lane_s32(ptr, src, lane) simde_vld1_lane_s32((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x1_t simde_vld1_lane_s64(int64_t const *ptr, simde_int64x1_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
simde_int64x1_private r = simde_int64x1_to_private(src);
r.values[lane] = *ptr;
return simde_int64x1_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_s64(ptr, src, lane) vld1_lane_s64(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_s64
#define vld1_lane_s64(ptr, src, lane) simde_vld1_lane_s64((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t simde_vld1_lane_u8(uint8_t const *ptr, simde_uint8x8_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_uint8x8_private r = simde_uint8x8_to_private(src);
r.values[lane] = *ptr;
return simde_uint8x8_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_u8(ptr, src, lane) vld1_lane_u8(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_u8
#define vld1_lane_u8(ptr, src, lane) simde_vld1_lane_u8((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t simde_vld1_lane_u16(uint16_t const *ptr, simde_uint16x4_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint16x4_private r = simde_uint16x4_to_private(src);
r.values[lane] = *ptr;
return simde_uint16x4_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_u16(ptr, src, lane) vld1_lane_u16(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_u16
#define vld1_lane_u16(ptr, src, lane) simde_vld1_lane_u16((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t simde_vld1_lane_u32(uint32_t const *ptr, simde_uint32x2_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_uint32x2_private r = simde_uint32x2_to_private(src);
r.values[lane] = *ptr;
return simde_uint32x2_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_u32(ptr, src, lane) vld1_lane_u32(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_u32
#define vld1_lane_u32(ptr, src, lane) simde_vld1_lane_u32((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x1_t simde_vld1_lane_u64(uint64_t const *ptr, simde_uint64x1_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
simde_uint64x1_private r = simde_uint64x1_to_private(src);
r.values[lane] = *ptr;
return simde_uint64x1_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_u64(ptr, src, lane) vld1_lane_u64(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_u64
#define vld1_lane_u64(ptr, src, lane) simde_vld1_lane_u64((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t simde_vld1_lane_f32(simde_float32_t const *ptr, simde_float32x2_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float32x2_private r = simde_float32x2_to_private(src);
r.values[lane] = *ptr;
return simde_float32x2_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1_lane_f32(ptr, src, lane) vld1_lane_f32(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_f32
#define vld1_lane_f32(ptr, src, lane) simde_vld1_lane_f32((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t simde_vld1_lane_f64(simde_float64_t const *ptr, simde_float64x1_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
simde_float64x1_private r = simde_float64x1_to_private(src);
r.values[lane] = *ptr;
return simde_float64x1_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vld1_lane_f64(ptr, src, lane) vld1_lane_f64(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1_lane_f64
#define vld1_lane_f64(ptr, src, lane) simde_vld1_lane_f64((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t simde_vld1q_lane_s8(int8_t const *ptr, simde_int8x16_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
simde_int8x16_private r = simde_int8x16_to_private(src);
r.values[lane] = *ptr;
return simde_int8x16_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_s8(ptr, src, lane) vld1q_lane_s8(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_s8
#define vld1q_lane_s8(ptr, src, lane) simde_vld1q_lane_s8((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t simde_vld1q_lane_s16(int16_t const *ptr, simde_int16x8_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_int16x8_private r = simde_int16x8_to_private(src);
r.values[lane] = *ptr;
return simde_int16x8_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_s16(ptr, src, lane) vld1q_lane_s16(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_s16
#define vld1q_lane_s16(ptr, src, lane) simde_vld1q_lane_s16((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t simde_vld1q_lane_s32(int32_t const *ptr, simde_int32x4_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int32x4_private r = simde_int32x4_to_private(src);
r.values[lane] = *ptr;
return simde_int32x4_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_s32(ptr, src, lane) vld1q_lane_s32(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_s32
#define vld1q_lane_s32(ptr, src, lane) simde_vld1q_lane_s32((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t simde_vld1q_lane_s64(int64_t const *ptr, simde_int64x2_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_int64x2_private r = simde_int64x2_to_private(src);
r.values[lane] = *ptr;
return simde_int64x2_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_s64(ptr, src, lane) vld1q_lane_s64(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_s64
#define vld1q_lane_s64(ptr, src, lane) simde_vld1q_lane_s64((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t simde_vld1q_lane_u8(uint8_t const *ptr, simde_uint8x16_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) {
simde_uint8x16_private r = simde_uint8x16_to_private(src);
r.values[lane] = *ptr;
return simde_uint8x16_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_u8(ptr, src, lane) vld1q_lane_u8(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_u8
#define vld1q_lane_u8(ptr, src, lane) simde_vld1q_lane_u8((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t simde_vld1q_lane_u16(uint16_t const *ptr, simde_uint16x8_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_uint16x8_private r = simde_uint16x8_to_private(src);
r.values[lane] = *ptr;
return simde_uint16x8_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_u16(ptr, src, lane) vld1q_lane_u16(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_u16
#define vld1q_lane_u16(ptr, src, lane) simde_vld1q_lane_u16((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t simde_vld1q_lane_u32(uint32_t const *ptr, simde_uint32x4_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint32x4_private r = simde_uint32x4_to_private(src);
r.values[lane] = *ptr;
return simde_uint32x4_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_u32(ptr, src, lane) vld1q_lane_u32(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_u32
#define vld1q_lane_u32(ptr, src, lane) simde_vld1q_lane_u32((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t simde_vld1q_lane_u64(uint64_t const *ptr, simde_uint64x2_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_uint64x2_private r = simde_uint64x2_to_private(src);
r.values[lane] = *ptr;
return simde_uint64x2_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_u64(ptr, src, lane) vld1q_lane_u64(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_u64
#define vld1q_lane_u64(ptr, src, lane) simde_vld1q_lane_u64((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t simde_vld1q_lane_f32(simde_float32_t const *ptr, simde_float32x4_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_float32x4_private r = simde_float32x4_to_private(src);
r.values[lane] = *ptr;
return simde_float32x4_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vld1q_lane_f32(ptr, src, lane) vld1q_lane_f32(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_f32
#define vld1q_lane_f32(ptr, src, lane) simde_vld1q_lane_f32((ptr), (src), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t simde_vld1q_lane_f64(simde_float64_t const *ptr, simde_float64x2_t src,
const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float64x2_private r = simde_float64x2_to_private(src);
r.values[lane] = *ptr;
return simde_float64x2_from_private(r);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vld1q_lane_f64(ptr, src, lane) vld1q_lane_f64(ptr, src, lane)
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vld1q_lane_f64
#define vld1q_lane_f64(ptr, src, lane) simde_vld1q_lane_f64((ptr), (src), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_LD1_LANE_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/pmax.h | .h | 8,449 | 284 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_PMAX_H)
#define SIMDE_ARM_NEON_PMAX_H
#include "types.h"
#include "max.h"
#include "uzp1.h"
#include "uzp2.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vpmaxs_f32(simde_float32x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxs_f32(a);
#else
simde_float32x2_private a_ = simde_float32x2_to_private(a);
return (a_.values[0] > a_.values[1]) ? a_.values[0] : a_.values[1];
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpmaxs_f32
#define vpmaxs_f32(a) simde_vpmaxs_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vpmaxqd_f64(simde_float64x2_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxqd_f64(a);
#else
simde_float64x2_private a_ = simde_float64x2_to_private(a);
return (a_.values[0] > a_.values[1]) ? a_.values[0] : a_.values[1];
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpmaxqd_f64
#define vpmaxqd_f64(a) simde_vpmaxqd_f64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vpmax_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_f32(a, b);
#else
return simde_vmax_f32(simde_vuzp1_f32(a, b), simde_vuzp2_f32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_f32
#define vpmax_f32(a, b) simde_vpmax_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x8_t
simde_vpmax_s8(simde_int8x8_t a, simde_int8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_s8(a, b);
#else
return simde_vmax_s8(simde_vuzp1_s8(a, b), simde_vuzp2_s8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_s8
#define vpmax_s8(a, b) simde_vpmax_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vpmax_s16(simde_int16x4_t a, simde_int16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_s16(a, b);
#else
return simde_vmax_s16(simde_vuzp1_s16(a, b), simde_vuzp2_s16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_s16
#define vpmax_s16(a, b) simde_vpmax_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vpmax_s32(simde_int32x2_t a, simde_int32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_s32(a, b);
#else
return simde_vmax_s32(simde_vuzp1_s32(a, b), simde_vuzp2_s32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_s32
#define vpmax_s32(a, b) simde_vpmax_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x8_t
simde_vpmax_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_u8(a, b);
#else
return simde_vmax_u8(simde_vuzp1_u8(a, b), simde_vuzp2_u8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_u8
#define vpmax_u8(a, b) simde_vpmax_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vpmax_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_u16(a, b);
#else
return simde_vmax_u16(simde_vuzp1_u16(a, b), simde_vuzp2_u16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_u16
#define vpmax_u16(a, b) simde_vpmax_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vpmax_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vpmax_u32(a, b);
#else
return simde_vmax_u32(simde_vuzp1_u32(a, b), simde_vuzp2_u32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmax_u32
#define vpmax_u32(a, b) simde_vpmax_u32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vpmaxq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_f32(a, b);
#else
return simde_vmaxq_f32(simde_vuzp1q_f32(a, b), simde_vuzp2q_f32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_f32
#define vpmaxq_f32(a, b) simde_vpmaxq_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vpmaxq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_f64(a, b);
#else
return simde_vmaxq_f64(simde_vuzp1q_f64(a, b), simde_vuzp2q_f64(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_f64
#define vpmaxq_f64(a, b) simde_vpmaxq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int8x16_t
simde_vpmaxq_s8(simde_int8x16_t a, simde_int8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_s8(a, b);
#else
return simde_vmaxq_s8(simde_vuzp1q_s8(a, b), simde_vuzp2q_s8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_s8
#define vpmaxq_s8(a, b) simde_vpmaxq_s8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vpmaxq_s16(simde_int16x8_t a, simde_int16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_s16(a, b);
#else
return simde_vmaxq_s16(simde_vuzp1q_s16(a, b), simde_vuzp2q_s16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_s16
#define vpmaxq_s16(a, b) simde_vpmaxq_s16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vpmaxq_s32(simde_int32x4_t a, simde_int32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_s32(a, b);
#else
return simde_vmaxq_s32(simde_vuzp1q_s32(a, b), simde_vuzp2q_s32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_s32
#define vpmaxq_s32(a, b) simde_vpmaxq_s32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint8x16_t
simde_vpmaxq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_u8(a, b);
#else
return simde_vmaxq_u8(simde_vuzp1q_u8(a, b), simde_vuzp2q_u8(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_u8
#define vpmaxq_u8(a, b) simde_vpmaxq_u8((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vpmaxq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_u16(a, b);
#else
return simde_vmaxq_u16(simde_vuzp1q_u16(a, b), simde_vuzp2q_u16(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_u16
#define vpmaxq_u16(a, b) simde_vpmaxq_u16((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vpmaxq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vpmaxq_u32(a, b);
#else
return simde_vmaxq_u32(simde_vuzp1q_u32(a, b), simde_vuzp2q_u32(a, b));
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vpmaxq_u32
#define vpmaxq_u32(a, b) simde_vpmaxq_u32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_PMAX_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/recps.h | .h | 3,893 | 125 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2021 Zhi An Ng <zhin@google.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_RECPS_H)
#define SIMDE_ARM_NEON_RECPS_H
#include "dup_n.h"
#include "mls.h"
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vrecpss_f32(simde_float32_t a, simde_float32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrecpss_f32(a, b);
#else
return SIMDE_FLOAT32_C(2.0) - (a * b);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrecpss_f32
#define vrecpss_f32(a, b) simde_vrecpss_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vrecpsd_f64(simde_float64_t a, simde_float64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrecpsd_f64(a, b);
#else
return SIMDE_FLOAT64_C(2.0) - (a * b);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrecpsd_f64
#define vrecpsd_f64(a, b) simde_vrecpsd_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vrecps_f64(simde_float64x1_t a, simde_float64x1_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrecps_f64(a, b);
#else
return simde_vmls_f64(simde_vdup_n_f64(SIMDE_FLOAT64_C(2.0)), a, b);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrecps_f64
#define vrecps_f64(a, b) simde_vrecps_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vrecps_f32(simde_float32x2_t a, simde_float32x2_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrecps_f32(a, b);
#else
return simde_vmls_f32(simde_vdup_n_f32(SIMDE_FLOAT32_C(2.0)), a, b);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrecps_f32
#define vrecps_f32(a, b) simde_vrecps_f32((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vrecpsq_f64(simde_float64x2_t a, simde_float64x2_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vrecpsq_f64(a, b);
#else
return simde_vmlsq_f64(simde_vdupq_n_f64(SIMDE_FLOAT64_C(2.0)), a, b);
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vrecpsq_f64
#define vrecpsq_f64(a, b) simde_vrecpsq_f64((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vrecpsq_f32(simde_float32x4_t a, simde_float32x4_t b) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vrecpsq_f32(a, b);
#else
return simde_vmlsq_f32(simde_vdupq_n_f32(SIMDE_FLOAT32_C(2.0)), a, b);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vrecpsq_f32
#define vrecpsq_f32(a, b) simde_vrecpsq_f32((a), (b))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_RECPS_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mla_n.h | .h | 10,388 | 334 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MLA_N_H)
#define SIMDE_ARM_NEON_MLA_N_H
#include "types.h"
#include "add.h"
#include "mul.h"
#include "mul_n.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmla_n_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32 c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_n_f32(a, b, c);
#else
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_float32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_n_f32
#define vmla_n_f32(a, b, c) simde_vmla_n_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmla_n_s16(simde_int16x4_t a, simde_int16x4_t b, int16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_n_s16(a, b, c);
#else
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) && !defined(SIMDE_BUG_GCC_100762)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_int16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_n_s16
#define vmla_n_s16(a, b, c) simde_vmla_n_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmla_n_s32(simde_int32x2_t a, simde_int32x2_t b, int32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_n_s32(a, b, c);
#else
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_int32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_n_s32
#define vmla_n_s32(a, b, c) simde_vmla_n_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmla_n_u16(simde_uint16x4_t a, simde_uint16x4_t b, uint16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_n_u16(a, b, c);
#else
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_uint16x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_n_u16
#define vmla_n_u16(a, b, c) simde_vmla_n_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmla_n_u32(simde_uint32x2_t a, simde_uint32x2_t b, uint32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmla_n_u32(a, b, c);
#else
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_uint32x2_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmla_n_u32
#define vmla_n_u32(a, b, c) simde_vmla_n_u32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmlaq_n_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32 c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_n_f32(a, b, c);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
return simde_vaddq_f32(simde_vmulq_n_f32(b, c), a);
#else
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_float32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_n_f32
#define vmlaq_n_f32(a, b, c) simde_vmlaq_n_f32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmlaq_n_s16(simde_int16x8_t a, simde_int16x8_t b, int16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_n_s16(a, b, c);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
return simde_vaddq_s16(simde_vmulq_n_s16(b, c), a);
#else
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_int16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_n_s16
#define vmlaq_n_s16(a, b, c) simde_vmlaq_n_s16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmlaq_n_s32(simde_int32x4_t a, simde_int32x4_t b, int32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_n_s32(a, b, c);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
return simde_vaddq_s32(simde_vmulq_n_s32(b, c), a);
#else
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_int32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_n_s32
#define vmlaq_n_s32(a, b, c) simde_vmlaq_n_s32((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmlaq_n_u16(simde_uint16x8_t a, simde_uint16x8_t b, uint16_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_n_u16(a, b, c);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
return simde_vaddq_u16(simde_vmulq_n_u16(b, c), a);
#else
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_uint16x8_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_n_u16
#define vmlaq_n_u16(a, b, c) simde_vmlaq_n_u16((a), (b), (c))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmlaq_n_u32(simde_uint32x4_t a, simde_uint32x4_t b, uint32_t c) {
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vmlaq_n_u32(a, b, c);
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128)
return simde_vaddq_u32(simde_vmulq_n_u32(b, c), a);
#else
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.values = (b_.values * c) + a_.values;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = (b_.values[i] * c) + a_.values[i];
}
#endif
return simde_uint32x4_from_private(r_);
#endif
}
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmlaq_n_u32
#define vmlaq_n_u32(a, b, c) simde_vmlaq_n_u32((a), (b), (c))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MLA_N_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/movl_high.h | .h | 3,784 | 127 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
*/
#if !defined(SIMDE_ARM_NEON_MOVL_HIGH_H)
#define SIMDE_ARM_NEON_MOVL_HIGH_H
#include "types.h"
#include "movl.h"
#include "get_high.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmovl_high_s8(simde_int8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovl_high_s8(a);
#else
return simde_vmovl_s8(simde_vget_high_s8(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovl_high_s8
#define vmovl_high_s8(a) simde_vmovl_high_s8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmovl_high_s16(simde_int16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovl_high_s16(a);
#else
return simde_vmovl_s16(simde_vget_high_s16(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovl_high_s16
#define vmovl_high_s16(a) simde_vmovl_high_s16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int64x2_t
simde_vmovl_high_s32(simde_int32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovl_high_s32(a);
#else
return simde_vmovl_s32(simde_vget_high_s32(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovl_high_s32
#define vmovl_high_s32(a) simde_vmovl_high_s32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmovl_high_u8(simde_uint8x16_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovl_high_u8(a);
#else
return simde_vmovl_u8(simde_vget_high_u8(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovl_high_u8
#define vmovl_high_u8(a) simde_vmovl_high_u8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmovl_high_u16(simde_uint16x8_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovl_high_u16(a);
#else
return simde_vmovl_u16(simde_vget_high_u16(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovl_high_u16
#define vmovl_high_u16(a) simde_vmovl_high_u16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint64x2_t
simde_vmovl_high_u32(simde_uint32x4_t a) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vmovl_high_u32(a);
#else
return simde_vmovl_u32(simde_vget_high_u32(a));
#endif
}
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmovl_high_u32
#define vmovl_high_u32(a) simde_vmovl_high_u32((a))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MOVL_HIGH_H) */
| Unknown |
3D | OpenMS/OpenMS | src/openms/extern/simde/simde/arm/neon/mul_lane.h | .h | 23,046 | 696 | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_ARM_NEON_MUL_LANE_H)
#define SIMDE_ARM_NEON_MUL_LANE_H
#include "types.h"
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vmuld_lane_f64(simde_float64_t a, simde_float64x1_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
return a * simde_float64x1_to_private(b).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vmuld_lane_f64(a, b, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuld_lane_f64(a, b, lane))
#else
#define simde_vmuld_lane_f64(a, b, lane) vmuld_lane_f64((a), (b), (lane))
#endif
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmuld_lane_f64
#define vmuld_lane_f64(a, b, lane) simde_vmuld_lane_f64(a, b, lane)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64_t
simde_vmuld_laneq_f64(simde_float64_t a, simde_float64x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return a * simde_float64x2_to_private(b).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vmuld_laneq_f64(a, b, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuld_laneq_f64(a, b, lane))
#else
#define simde_vmuld_laneq_f64(a, b, lane) vmuld_laneq_f64((a), (b), (lane))
#endif
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmuld_laneq_f64
#define vmuld_laneq_f64(a, b, lane) simde_vmuld_laneq_f64(a, b, lane)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vmuls_lane_f32(simde_float32_t a, simde_float32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
return a * simde_float32x2_to_private(b).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vmuls_lane_f32(a, b, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuls_lane_f32(a, b, lane))
#else
#define simde_vmuls_lane_f32(a, b, lane) vmuls_lane_f32((a), (b), (lane))
#endif
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmuls_lane_f32
#define vmuls_lane_f32(a, b, lane) simde_vmuls_lane_f32(a, b, lane)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32_t
simde_vmuls_laneq_f32(simde_float32_t a, simde_float32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
return a * simde_float32x4_to_private(b).values[lane];
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
#define simde_vmuls_laneq_f32(a, b, lane) \
SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuls_laneq_f32(a, b, lane))
#else
#define simde_vmuls_laneq_f32(a, b, lane) vmuls_laneq_f32((a), (b), (lane))
#endif
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmuls_laneq_f32
#define vmuls_laneq_f32(a, b, lane) simde_vmuls_laneq_f32(a, b, lane)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmul_lane_f32(simde_float32x2_t a, simde_float32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a),
b_ = simde_float32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmul_lane_f32(a, b, lane) vmul_lane_f32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_lane_f32
#define vmul_lane_f32(a, b, lane) simde_vmul_lane_f32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmul_lane_f64(simde_float64x1_t a, simde_float64x1_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a),
b_ = simde_float64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float64x1_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_lane_f64(a, b, lane) vmul_lane_f64((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_lane_f64
#define vmul_lane_f64(a, b, lane) simde_vmul_lane_f64((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmul_lane_s16(simde_int16x4_t a, simde_int16x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a),
b_ = simde_int16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmul_lane_s16(a, b, lane) vmul_lane_s16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_lane_s16
#define vmul_lane_s16(a, b, lane) simde_vmul_lane_s16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmul_lane_s32(simde_int32x2_t a, simde_int32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a),
b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmul_lane_s32(a, b, lane) vmul_lane_s32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_lane_s32
#define vmul_lane_s32(a, b, lane) simde_vmul_lane_s32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmul_lane_u16(simde_uint16x4_t a, simde_uint16x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a),
b_ = simde_uint16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmul_lane_u16(a, b, lane) vmul_lane_u16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_lane_u16
#define vmul_lane_u16(a, b, lane) simde_vmul_lane_u16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmul_lane_u32(simde_uint32x2_t a, simde_uint32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a),
b_ = simde_uint32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmul_lane_u32(a, b, lane) vmul_lane_u32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmul_lane_u32
#define vmul_lane_u32(a, b, lane) simde_vmul_lane_u32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x4_t
simde_vmul_laneq_s16(simde_int16x4_t a, simde_int16x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_int16x4_private
r_,
a_ = simde_int16x4_to_private(a);
simde_int16x8_private
b_ = simde_int16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_laneq_s16(a, b, lane) vmul_laneq_s16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_laneq_s16
#define vmul_laneq_s16(a, b, lane) simde_vmul_laneq_s16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x2_t
simde_vmul_laneq_s32(simde_int32x2_t a, simde_int32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int32x2_private
r_,
a_ = simde_int32x2_to_private(a);
simde_int32x4_private
b_ = simde_int32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_laneq_s32(a, b, lane) vmul_laneq_s32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_laneq_s32
#define vmul_laneq_s32(a, b, lane) simde_vmul_laneq_s32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x4_t
simde_vmul_laneq_u16(simde_uint16x4_t a, simde_uint16x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_uint16x4_private
r_,
a_ = simde_uint16x4_to_private(a);
simde_uint16x8_private
b_ = simde_uint16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint16x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_laneq_u16(a, b, lane) vmul_laneq_u16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_laneq_u16
#define vmul_laneq_u16(a, b, lane) simde_vmul_laneq_u16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x2_t
simde_vmul_laneq_u32(simde_uint32x2_t a, simde_uint32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint32x2_private
r_,
a_ = simde_uint32x2_to_private(a);
simde_uint32x4_private
b_ = simde_uint32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_laneq_u32(a, b, lane) vmul_laneq_u32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_laneq_u32
#define vmul_laneq_u32(a, b, lane) simde_vmul_laneq_u32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmulq_lane_f32(simde_float32x4_t a, simde_float32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a);
simde_float32x2_private b_ = simde_float32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmulq_lane_f32(a, b, lane) vmulq_lane_f32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_lane_f32
#define vmulq_lane_f32(a, b, lane) simde_vmulq_lane_f32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmulq_lane_f64(simde_float64x2_t a, simde_float64x1_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) {
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a);
simde_float64x1_private b_ = simde_float64x1_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_lane_f64(a, b, lane) vmulq_lane_f64((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_lane_f64
#define vmulq_lane_f64(a, b, lane) simde_vmulq_lane_f64((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmulq_lane_s16(simde_int16x8_t a, simde_int16x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a);
simde_int16x4_private b_ = simde_int16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmulq_lane_s16(a, b, lane) vmulq_lane_s16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_lane_s16
#define vmulq_lane_s16(a, b, lane) simde_vmulq_lane_s16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmulq_lane_s32(simde_int32x4_t a, simde_int32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a);
simde_int32x2_private b_ = simde_int32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmulq_lane_s32(a, b, lane) vmulq_lane_s32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_lane_s32
#define vmulq_lane_s32(a, b, lane) simde_vmulq_lane_s32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmulq_lane_u16(simde_uint16x8_t a, simde_uint16x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a);
simde_uint16x4_private b_ = simde_uint16x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmulq_lane_u16(a, b, lane) vmulq_lane_u16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_lane_u16
#define vmulq_lane_u16(a, b, lane) simde_vmulq_lane_u16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmulq_lane_u32(simde_uint32x4_t a, simde_uint32x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a);
simde_uint32x2_private b_ = simde_uint32x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_vmulq_lane_u32(a, b, lane) vmulq_lane_u32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
#undef vmulq_lane_u32
#define vmulq_lane_u32(a, b, lane) simde_vmulq_lane_u32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x4_t
simde_vmulq_laneq_f32(simde_float32x4_t a, simde_float32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_float32x4_private
r_,
a_ = simde_float32x4_to_private(a),
b_ = simde_float32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_laneq_f32(a, b, lane) vmulq_laneq_f32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_laneq_f32
#define vmulq_laneq_f32(a, b, lane) simde_vmulq_laneq_f32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x2_t
simde_vmulq_laneq_f64(simde_float64x2_t a, simde_float64x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float64x2_private
r_,
a_ = simde_float64x2_to_private(a),
b_ = simde_float64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float64x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_laneq_f64(a, b, lane) vmulq_laneq_f64((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_laneq_f64
#define vmulq_laneq_f64(a, b, lane) simde_vmulq_laneq_f64((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int16x8_t
simde_vmulq_laneq_s16(simde_int16x8_t a, simde_int16x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_int16x8_private
r_,
a_ = simde_int16x8_to_private(a),
b_ = simde_int16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_laneq_s16(a, b, lane) vmulq_laneq_s16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_laneq_s16
#define vmulq_laneq_s16(a, b, lane) simde_vmulq_laneq_s16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_int32x4_t
simde_vmulq_laneq_s32(simde_int32x4_t a, simde_int32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_int32x4_private
r_,
a_ = simde_int32x4_to_private(a),
b_ = simde_int32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_int32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_laneq_s32(a, b, lane) vmulq_laneq_s32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_laneq_s32
#define vmulq_laneq_s32(a, b, lane) simde_vmulq_laneq_s32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint16x8_t
simde_vmulq_laneq_u16(simde_uint16x8_t a, simde_uint16x8_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) {
simde_uint16x8_private
r_,
a_ = simde_uint16x8_to_private(a),
b_ = simde_uint16x8_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint16x8_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_laneq_u16(a, b, lane) vmulq_laneq_u16((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_laneq_u16
#define vmulq_laneq_u16(a, b, lane) simde_vmulq_laneq_u16((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_uint32x4_t
simde_vmulq_laneq_u32(simde_uint32x4_t a, simde_uint32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_uint32x4_private
r_,
a_ = simde_uint32x4_to_private(a),
b_ = simde_uint32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_uint32x4_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmulq_laneq_u32(a, b, lane) vmulq_laneq_u32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmulq_laneq_u32
#define vmulq_laneq_u32(a, b, lane) simde_vmulq_laneq_u32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32x2_t
simde_vmul_laneq_f32(simde_float32x2_t a, simde_float32x4_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) {
simde_float32x2_private
r_,
a_ = simde_float32x2_to_private(a);
simde_float32x4_private b_ = simde_float32x4_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float32x2_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_laneq_f32(a, b, lane) vmul_laneq_f32((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_laneq_f32
#define vmul_laneq_f32(a, b, lane) simde_vmul_laneq_f32((a), (b), (lane))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64x1_t
simde_vmul_laneq_f64(simde_float64x1_t a, simde_float64x2_t b, const int lane)
SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) {
simde_float64x1_private
r_,
a_ = simde_float64x1_to_private(a);
simde_float64x2_private b_ = simde_float64x2_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
r_.values[i] = a_.values[i] * b_.values[lane];
}
return simde_float64x1_from_private(r_);
}
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define simde_vmul_laneq_f64(a, b, lane) vmul_laneq_f64((a), (b), (lane))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
#undef vmul_laneq_f64
#define vmul_laneq_f64(a, b, lane) simde_vmul_laneq_f64((a), (b), (lane))
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_ARM_NEON_MUL_LANE_H) */
| Unknown |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.