Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
106 changes: 9 additions & 97 deletions include/xsimd/arch/xsimd_avx512bw.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -484,113 +484,25 @@ namespace xsimd
}

// slide_left
namespace detail
{
struct make_slide_perm_hi
{
static constexpr uint64_t get(size_t i, size_t)
{
return i == 0 ? 8 : i - 1;
}
};

template <size_t N>
struct make_slide_left_pattern
{
static constexpr uint16_t get(size_t i, size_t)
{
return i >= N ? i - N : 0;
}
};
}

template <size_t N, class A, class T>
template <size_t N, class A, class T, class = typename std::enable_if<(N & 3) == 2 && (N < 64)>::type>
XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<avx512bw>) noexcept
{
if (N == 0)
{
return x;
}
if (N >= 64)
{
return batch<T, A>(T(0));
}
batch<T, A> xx;
if (N & 1)
{
alignas(A::alignment()) uint64_t buffer[8];
_mm512_store_epi64(&buffer[0], x);
for (int i = 7; i > 0; --i)
buffer[i] = (buffer[i] << 8) | (buffer[i - 1] >> 56);
buffer[0] = buffer[0] << 8;
xx = _mm512_load_epi64(&buffer[0]);
static_assert((N & 3) == 2 && N < 64, "The AVX512F implementation may have a lower latency.");

auto slide_perm = xsimd::make_batch_constant<uint64_t, detail::make_slide_perm_hi, A>();
__m512i xl = _mm512_slli_epi64(x, 8);
__m512i xr = _mm512_srli_epi64(x, 56);
xr = _mm512_permutex2var_epi64(xr, slide_perm.as_batch(), _mm512_setzero_si512());
xx = _mm512_or_si512(xr, xl);
if (N == 1)
return xx;
}
else
{
xx = x;
}
__mmask32 mask = 0xFFFFFFFFu << ((N / 2) & 31);
auto slide_pattern = xsimd::make_batch_constant<uint16_t, detail::make_slide_left_pattern<N / 2>, A>();
return _mm512_maskz_permutexvar_epi16(mask, slide_pattern.as_batch(), xx);
auto slide_pattern = make_batch_constant<uint16_t, detail::make_slide_left_pattern<N / 2>, A>();
return _mm512_maskz_permutexvar_epi16(mask, slide_pattern.as_batch(), x);
}

// slide_right
namespace detail
{
struct make_slide_perm_low
{
static constexpr uint64_t get(size_t i, size_t)
{
return i + 1;
}
};

template <size_t N>
struct make_slide_right_pattern
{
static constexpr uint16_t get(size_t i, size_t n)
{
return i < (n - N) ? i + N : 0;
}
};
}
template <size_t N, class A, class T>
template <size_t N, class A, class T, class = typename std::enable_if<(N & 3) == 2 && (N < 64)>::type>
XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<avx512bw>) noexcept
{
if (N == 0)
{
return x;
}
if (N >= 64)
{
return batch<T, A>(T(0));
}
batch<T, A> xx;
if (N & 1)
{
auto slide_perm = xsimd::make_batch_constant<uint64_t, detail::make_slide_perm_low, A>();
__m512i xr = _mm512_srli_epi64(x, 8);
__m512i xl = _mm512_slli_epi64(x, 56);
xl = _mm512_permutex2var_epi64(xl, slide_perm.as_batch(), _mm512_setzero_si512());
xx = _mm512_or_si512(xr, xl);
if (N == 1)
return xx;
}
else
{
xx = x;
}
static_assert((N & 3) == 2 && N < 64, "The AVX512F implementation may have a lower latency.");

__mmask32 mask = 0xFFFFFFFFu >> ((N / 2) & 31);
auto slide_pattern = xsimd::make_batch_constant<uint16_t, detail::make_slide_right_pattern<N / 2>, A>();
return _mm512_maskz_permutexvar_epi16(mask, slide_pattern.as_batch(), xx);
auto slide_pattern = make_batch_constant<uint16_t, detail::make_slide_right_pattern<N / 2>, A>();
return _mm512_maskz_permutexvar_epi16(mask, slide_pattern.as_batch(), x);
}

// ssub
Expand Down
103 changes: 97 additions & 6 deletions include/xsimd/arch/xsimd_avx512f.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1859,19 +1859,110 @@ namespace xsimd
}

// slide_left
namespace detail
{
template <size_t N>
struct make_slide_left_pattern
{
static constexpr size_t get(size_t i, size_t)
{
return i >= N ? i - N : 0;
}
};

template <size_t N, class A, class T>
XSIMD_INLINE batch<T, A> slide_left_aligned_u32(batch<T, A> const& x, requires_arch<avx512f>) noexcept
{
static_assert((N & 3) == 0 || N >= 64, "N must be aligned to 32 bits");

if (N == 0)
{
return x;
}
if (N >= 64)
{
return batch<T, A>(T(0));
}

__mmask16 mask = uint16_t(0xFFFFu << (N / 4));

if ((N & 15) == 0)
{
const uint8_t imm8 = uint8_t(0xe4 << (2 * (N / 16)));
return _mm512_maskz_shuffle_i32x4(mask, x, x, imm8);
}

auto slide_pattern = make_batch_constant<uint32_t, detail::make_slide_left_pattern<N / 4>, A>();
return _mm512_maskz_permutexvar_epi32(mask, slide_pattern.as_batch(), x);
}
}

template <size_t N, class A, class T>
XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const&, requires_arch<avx512f>) noexcept
XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<avx512f>) noexcept
{
static_assert(N == 0xDEAD, "not implemented yet");
return {};
constexpr size_t NN = N & ~3;
if (N == NN || NN >= 64)
{
// Call fast path
return detail::slide_left_aligned_u32<NN>(x, A {});
}

__m512i xl = detail::slide_left_aligned_u32<NN, A, T>(_mm512_slli_epi32(x, 8 * (N - NN)), A {});
__m512i xr = detail::slide_left_aligned_u32<NN + 4, A, T>(_mm512_srli_epi32(x, 32 - 8 * (N - NN)), A {});
return _mm512_or_epi32(xl, xr);
}

// slide_right
namespace detail
{
template <size_t N>
struct make_slide_right_pattern
{
static constexpr size_t get(size_t i, size_t n)
{
return i < (n - N) ? i + N : 0;
}
};

template <size_t N, class A, class T>
XSIMD_INLINE batch<T, A> slide_right_aligned_u32(batch<T, A> const& x, requires_arch<avx512f>) noexcept
{
static_assert((N & 3) == 0 || N >= 64, "N must be aligned to 32 bits");

if (N == 0)
{
return x;
}
if (N >= 64)
{
return batch<T, A>(T(0));
}

__mmask16 mask = 0xFFFFu >> (N / 4);

if ((N & 15) == 0)
{
const uint8_t imm8 = 0xe4 >> (2 * (N / 16));
return _mm512_maskz_shuffle_i32x4(mask, x, x, imm8);
}

auto slide_pattern = make_batch_constant<uint32_t, detail::make_slide_right_pattern<N / 4>, A>();
return _mm512_maskz_permutexvar_epi32(mask, slide_pattern.as_batch(), x);
}
}
template <size_t N, class A, class T>
XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const&, requires_arch<avx512f>) noexcept
XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<avx512f>) noexcept
{
static_assert(N == 0xDEAD, "not implemented yet");
return {};
constexpr size_t NN = N & ~3;
if (N == NN || NN >= 64)
{
// Call fast path
return detail::slide_right_aligned_u32<NN>(x, A {});
}

__m512i xl = detail::slide_right_aligned_u32<NN + 4, A, T>(_mm512_slli_epi32(x, 32 - 8 * (N - NN)), A {});
__m512i xr = detail::slide_right_aligned_u32<NN, A, T>(_mm512_srli_epi32(x, 8 * (N - NN)), A {});
return _mm512_or_epi32(xl, xr);
}

// sqrt
Expand Down
48 changes: 7 additions & 41 deletions include/xsimd/arch/xsimd_avx512vbmi.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,59 +24,25 @@ namespace xsimd
{
using namespace types;

namespace detail
{
template <size_t N>
struct make_slide_left_bytes_pattern
{
static constexpr uint8_t get(size_t i, size_t)
{
return i >= N ? i - N : 0;
}
};

template <size_t N>
struct make_slide_right_bytes_pattern
{
static constexpr uint8_t get(size_t i, size_t n)
{
return i < (n - N) ? i + N : 0;
}
};
}

// slide_left
template <size_t N, class A, class T>
template <size_t N, class A, class T, class = typename std::enable_if<(N & 3) != 0 && (N < 64)>::type>
XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<avx512vbmi>) noexcept
{
if (N == 0)
{
return x;
}
if (N >= 64)
{
return batch<T, A>(T(0));
}
static_assert((N & 3) != 0 && N < 64, "The AVX512F implementation may have a lower latency.");

__mmask64 mask = 0xFFFFFFFFFFFFFFFFull << (N & 63);
auto slide_pattern = xsimd::make_batch_constant<uint8_t, detail::make_slide_left_bytes_pattern<N>, A>();
auto slide_pattern = make_batch_constant<uint8_t, detail::make_slide_left_pattern<N>, A>();
return _mm512_maskz_permutexvar_epi8(mask, slide_pattern.as_batch(), x);
}

// slide_right
template <size_t N, class A, class T>
template <size_t N, class A, class T, class = typename std::enable_if<(N & 3) != 0 && (N < 64)>::type>
XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<avx512vbmi>) noexcept
{
if (N == 0)
{
return x;
}
if (N >= 64)
{
return batch<T, A>(T(0));
}
static_assert((N & 3) != 0 && N < 64, "The AVX512F implementation may have a lower latency.");

__mmask64 mask = 0xFFFFFFFFFFFFFFFFull >> (N & 63);
auto slide_pattern = xsimd::make_batch_constant<uint8_t, detail::make_slide_right_bytes_pattern<N>, A>();
auto slide_pattern = make_batch_constant<uint8_t, detail::make_slide_right_pattern<N>, A>();
return _mm512_maskz_permutexvar_epi8(mask, slide_pattern.as_batch(), x);
}

Expand Down
3 changes: 0 additions & 3 deletions test/test_shuffle.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,6 @@ namespace
};
}

#if !XSIMD_WITH_AVX512F || XSIMD_WITH_AVX512BW
template <class B>
struct slide_test : public init_slide_base<typename B::value_type, B::size>
{
Expand Down Expand Up @@ -270,8 +269,6 @@ TEST_CASE_TEMPLATE("[slide]", B, BATCH_INT_TYPES)
}
}

#endif

template <class B>
struct compress_test
{
Expand Down