The AVX2 256 bit permutation case
I do not think it is possible to write an efficient generic SSE4/AVX2/AVX-512 algorithm
that works for all vector sizes (128, 256, 512 bits), and element granularities (bits,
bit pairs, nibbles, bytes). One problem is that many AVX2 instructions that exist
for, for example, byte size elements, do not exist for double word elements,
and vice versa.
Below the AVX2 256 bit permutation case is discussed.
It might be possible to recycle the ideas of this case for other cases.
The idea is to extract 32 (permuted) bits per step from input vector x
.
In each step 32 bytes from permutation vector pos
are read.
Bits 7..3 of these pos
bytes determine which byte from x
is needed.
The right byte is selected by an emulated 256 bits wide AVX2 lane crossing byte
shuffle coded here by Ermlg.
Bits 2..0 of the pos
bytes determine which bit is sought.
With _mm256_movemask_epi8
the 32 bits are collected in one _uint32_t
This step is repeated 8 times, to get all the 256 permuted bits.
The code does not look very elegant. Nevertheless, I would be surprised
if a significantly faster, say two times faster, AVX2 method would exist.
/* gcc -O3 -m64 -Wall -mavx2 -march=skylake bitperm_avx2.c */
#include <immintrin.h>
#include <stdio.h>
#include <stdint.h>
inline __m256i shuf_epi8_lc(__m256i value, __m256i shuffle);
int print_epi64(__m256i a);
uint32_t get_32_bits(__m256i x, __m256i pos){
__m256i pshufb_mask = _mm256_set_epi8(0,0,0,0, 0,0,0,0, 128,64,32,16, 8,4,2,1, 0,0,0,0, 0,0,0,0, 128,64,32,16, 8,4,2,1);
__m256i byte_pos = _mm256_srli_epi32(pos, 3); /* which byte within the 32 bytes */
byte_pos = _mm256_and_si256(byte_pos, _mm256_set1_epi8(0x1F)); /* mask off the unwanted bits */
__m256i bit_pos = _mm256_and_si256(pos, _mm256_set1_epi8(0x07)); /* which bit within the byte */
__m256i bit_pos_mask = _mm256_shuffle_epi8(pshufb_mask, bit_pos); /* get bit mask */
__m256i bytes_wanted = shuf_epi8_lc(x, byte_pos); /* get the right bytes */
__m256i bits_wanted = _mm256_and_si256(bit_pos_mask, bytes_wanted); /* apply the bit mask to get rid of the unwanted bits within the byte */
__m256i bits_x8 = _mm256_cmpeq_epi8(bits_wanted, bit_pos_mask); /* check if the bit is set */
return _mm256_movemask_epi8(bits_x8);
}
__m256i get_256_bits(__m256i x, uint8_t* pos){ /* glue the 32 bit results together */
uint64_t t0 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[0]));
uint64_t t1 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[32]));
uint64_t t2 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[64]));
uint64_t t3 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[96]));
uint64_t t4 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[128]));
uint64_t t5 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[160]));
uint64_t t6 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[192]));
uint64_t t7 = get_32_bits(x, _mm256_loadu_si256((__m256i*)&pos[224]));
uint64_t t10 = (t1<<32)|t0;
uint64_t t32 = (t3<<32)|t2;
uint64_t t54 = (t5<<32)|t4;
uint64_t t76 = (t7<<32)|t6;
return(_mm256_set_epi64x(t76, t54, t32, t10));
}
inline __m256i shuf_epi8_lc(__m256i value, __m256i shuffle){
/* Ermlg's lane crossing byte shuffle https://mcmap.net/q/907584/-shuffle-elements-of-__m256i-vector */
const __m256i K0 = _mm256_setr_epi8(
0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70,
0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0);
const __m256i K1 = _mm256_setr_epi8(
0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70);
return _mm256_or_si256(_mm256_shuffle_epi8(value, _mm256_add_epi8(shuffle, K0)),
_mm256_shuffle_epi8(_mm256_permute4x64_epi64(value, 0x4E), _mm256_add_epi8(shuffle, K1)));
}
int main(){
__m256i input = _mm256_set_epi16(0x1234,0x9876,0x7890,0xABCD, 0x3456,0x7654,0x0123,0x4567,
0x0123,0x4567,0x89AB,0xCDEF, 0xFEDC,0xBA98,0x7654,0x3210);
/* Example */
/* 240 224 208 192 176 160 144 128 112 96 80 64 48 32 16 0 */
/* input 1234 9876 7890 ABCD | 3456 7654 0123 4567 | 0123 4567 89AB CDEF | FEDC BA98 7654 3210 */
/* output 0000 0000 0012 00FF | 90AB 3210 7654 ABCD | 8712 1200 FF90 AB32 | 7654 ABCD 1087 7654 */
uint8_t permutation[256] = {16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31,
28,29,30,31, 32,33,34,35, 0,1,2,3, 4,5,6,7,
72,73,74,75, 76,77,78,79, 80,81,82,83, 84,85,86,87,
160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175,
8,9,10,11, 12,13,14,15, 200,201,202,203, 204,205,206,207,
208,209,210,211, 212,213,214,215, 215,215,215,215, 215,215,215,215,
1,1,1,1, 1,1,1,1, 248,249,250,251, 252,253,254,255,
248,249,250,251, 252,253,254,255, 28,29,30,31, 32,33,34,35,
72,73,74,75, 76,77,78,79, 80,81,82,83, 84,85,86,87,
160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175,
0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15,
200,201,202,203, 204,205,206,207, 208,209,210,211, 212,213,214,215,
215,215,215,215, 215,215,215,215, 1,1,1,1, 1,1,1,1,
248,249,250,251, 252,253,254,255, 1,1,1,1, 1,1,1,1,
1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1};
printf("input = \n");
print_epi64(input);
__m256i x = get_256_bits(input, permutation);
printf("permuted input = \n");
print_epi64(x);
return 0;
}
int print_epi64(__m256i a){
uint64_t v[4];
int i;
_mm256_storeu_si256((__m256i*)v,a);
for (i = 3; i>=0; i--) printf("%016lX ",v[i]);
printf("\n");
return 0;
}
The output with the example permutation looks correct:
$ ./a.out
input =
123498767890ABCD 3456765401234567 0123456789ABCDEF FEDCBA9876543210
permuted input =
00000000001200FF 90AB32107654ABCD 87121200FF90AB32 7654ABCD10877654
Efficiency
If you look carefully at the algorithm, you will see that some operations only
depend on the permutation vector pos
, and not on x
. This means that the applying the
permutation with a variable x
, and a fixed pos
, should be more efficient
than applying the permutation with both variable x
and pos
.
This is illustrated by the following code:
/* apply the same permutation several times */
int perm_array(__m256i* restrict x_in, uint8_t* restrict pos, __m256i* restrict x_out){
for (int i = 0; i<1024; i++){
x_out[i]=get_256_bits(x_in[i], pos);
}
return 0;
}
With clang and gcc this compiles to really
nice code: Loop .L5
at line 237 only contains 16
vpshufb
s instead of 24. Moreover the vpaddb
s are hoisted out of the loop.
Note that there is also only one vpermq
inside the loop.
I do not know if MSVC will hoist such many instructions outside the loop.
If not, it might be possible
to improve the performance of the loop by modifying the code manually.
This should be done such that
the operations which only depend on pos
, and not on x
, are hoisted outside the loop.
With respect to the performance on Intel Skylake:
The throughput of this loop is likely limited by the
about 32 port 5 micro-ops per loop iteration. This means that the throughput
in a loop context such as perm_array
is about 256 permuted bits per 32 CPU cycles,
or about 8 permuted bits per CPU cycle.
128 bit permutations using AVX2 instructions
This code is quite similar to the 256 bit permutation case.
Although only 128 bits are permuted, the full 256 bit width of the AVX2
registers is used to achieve the best performance.
Here the byte shuffles are not emulated.
This is because there exists
an efficient single instruction to do the byte shuffling
within the 128 bit lanes: vpshufb
.
Function perm_array_128
tests the performance of the bit permutation
for a fixed permutation and a variable input x
.
The assembly loop contains about 11 port 5 (p5) micro-ops, if we
assume an Intel Skylake CPU.
These 11 p5 micro-ops take at least 11 CPU cycles (throughput).
So, in the best case we get a throughput of about 12 permuted bits per cycle, which is about 1.5 times as fast as the 256 bit permutation case.
/* gcc -O3 -m64 -Wall -mavx2 -march=skylake bitperm128_avx2.c */
#include <immintrin.h>
#include <stdio.h>
#include <stdint.h>
int print128_epi64(__m128i a);
uint32_t get_32_128_bits(__m256i x, __m256i pos){ /* extract 32 permuted bits out from 2x128 bits */
__m256i pshufb_mask = _mm256_set_epi8(0,0,0,0, 0,0,0,0, 128,64,32,16, 8,4,2,1, 0,0,0,0, 0,0,0,0, 128,64,32,16, 8,4,2,1);
__m256i byte_pos = _mm256_srli_epi32(pos, 3); /* which byte do we need within the 16 byte lanes. bits 6,5,4,3 select the right byte */
byte_pos = _mm256_and_si256(byte_pos, _mm256_set1_epi8(0xF)); /* mask off the unwanted bits (unnecessary if _mm256_srli_epi8 would have existed */
__m256i bit_pos = _mm256_and_si256(pos, _mm256_set1_epi8(0x07)); /* which bit within the byte */
__m256i bit_pos_mask = _mm256_shuffle_epi8(pshufb_mask, bit_pos); /* get bit mask */
__m256i bytes_wanted = _mm256_shuffle_epi8(x, byte_pos); /* get the right bytes */
__m256i bits_wanted = _mm256_and_si256(bit_pos_mask, bytes_wanted); /* apply the bit mask to get rid of the unwanted bits within the byte */
__m256i bits_x8 = _mm256_cmpeq_epi8(bits_wanted, bit_pos_mask); /* set all bits if the wanted bit is set */
return _mm256_movemask_epi8(bits_x8); /* move most significant bit of each byte to 32 bit register */
}
__m128i permute_128_bits(__m128i x, uint8_t* pos){ /* get bit permutations in 32 bit pieces and glue them together */
__m256i x2 = _mm256_broadcastsi128_si256(x); /* broadcast x to the hi and lo lane */
uint64_t t0 = get_32_128_bits(x2, _mm256_loadu_si256((__m256i*)&pos[0]));
uint64_t t1 = get_32_128_bits(x2, _mm256_loadu_si256((__m256i*)&pos[32]));
uint64_t t2 = get_32_128_bits(x2, _mm256_loadu_si256((__m256i*)&pos[64]));
uint64_t t3 = get_32_128_bits(x2, _mm256_loadu_si256((__m256i*)&pos[96]));
uint64_t t10 = (t1<<32)|t0;
uint64_t t32 = (t3<<32)|t2;
return(_mm_set_epi64x(t32, t10));
}
/* Test loop performance with the following loop (see assembly) -> 11 port5 uops inside the critical loop */
/* Use gcc -O3 -m64 -Wall -mavx2 -march=skylake -S bitperm128_avx2.c to generate the assembly */
int perm_array_128(__m128i* restrict x_in, uint8_t* restrict pos, __m128i* restrict x_out){
for (int i = 0; i<1024; i++){
x_out[i]=permute_128_bits(x_in[i], pos);
}
return 0;
}
int main(){
__m128i input = _mm_set_epi16(0x0123,0x4567,0xFEDC,0xBA98, 0x7654,0x3210,0x89AB,0xCDEF);
/* Example */
/* 112 96 80 64 48 32 16 0 */
/* input 0123 4567 FEDC BA98 7654 3210 89AB CDEF */
/* output 8FFF CDEF DCBA 08EF CDFF DCBA EFF0 89AB */
uint8_t permutation[128] = {16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31,
32,32,32,32, 36,36,36,36, 0,1,2,3, 4,5,6,7,
72,73,74,75, 76,77,78,79, 80,81,82,83, 84,85,86,87,
0,0,0,0, 0,0,0,0, 8,9,10,11, 12,13,14,15,
0,1,2,3, 4,5,6,7, 28,29,30,31, 32,33,34,35,
72,73,74,75, 76,77,78,79, 80,81,82,83, 84,85,86,87,
0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15,
1,1,1,1, 1,1,1,1, 1,1,1,1, 32,32,32,1};
printf("input = \n");
print128_epi64(input);
__m128i x = permute_128_bits(input, permutation);
printf("permuted input = \n");
print128_epi64(x);
return 0;
}
int print128_epi64(__m128i a){
uint64_t v[2];
int i;
_mm_storeu_si128((__m128i*)v,a);
for (i = 1; i>=0; i--) printf("%016lX ",v[i]);
printf("\n");
return 0;
}
Example output for some arbitrary permutation:
$ ./a.out
input =
01234567FEDCBA98 7654321089ABCDEF
permuted input =
8FFFCDEFDCBA08EF CDFFDCBAEFF089AB
_mm256_permutevar8x32_epi32
(vpermd
), which take a control vector for an arbitrary lane-crossing permute. (For 64-bit indices, you need to preprocess that to create an input forvpermd
, but that's just mapping eachi
to2*i + 0..1
with maybe and add same,same to double, then an in-lane shuffle, then adding a vector of 0,1,0,1,...) – Indemnityvpermq
with a vector control, not just immediate compile-time constant. Are your permutes going to be compile-time constants? The code you show usesconst
in[]
andperm[]
, so that's no help, the compiler can calculate the contents ofout
at compile time if both inputs are compile-time constants. And is that supposed to be 1 bit peruint8_t
for a 256-bit register? Or do you really want a 256-Byte shuffle (8 YMM registers wide)? – Indemnityin[]
is not compile-time constant, butperm[]
is - I'd be happy to give it in whatever form suitable (template parameters for a library, masks for assembly instructions and immediates).vpermd
shuffles blocks of 32 bit integers across lanes, but not the bits within the integers. As mentioned (maybe not clearly enough), permutations of blocks of size >= 8 are taken care of. So to emphasize, I really want a 256 BIT shuffle, not BYTE---that is, maybe you tell me that's faster, and storing one bit/two bits/one nibble per bite and disregarding the rest is the way to go. – Grasmere