How do I perform arbitrary permutations of a register float variable (always of length 32)? I have seen suggestions that __shfl_sync will do this, but no example showing this. A numpy version of a simple case of what I want to do with length 8 array:
a == some float32 array of length 8; specific patterns will always cycle mod 4 """ b = a[[3,2,1,0,7,6,5,4]]
$ cat t1486.cu
#include <stdio.h>
__global__ void k(int *pattern){
float my_val = (float)threadIdx.x + 0.1f;
my_val = __shfl_sync(0xFFFFFFFF, my_val, pattern[threadIdx.x]);
printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}
int main(){
int pattern[32] = {3,2,1,0,7,6,5,4};
for (int i = 8; i<32; i++) pattern[i] = i;
int *d_pattern;
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1486 t1486.cu
$ cuda-memcheck ./t1486
========= CUDA-MEMCHECK
warp lane: 0, val: 3.100000
warp lane: 1, val: 2.100000
warp lane: 2, val: 1.100000
warp lane: 3, val: 0.100000
warp lane: 4, val: 7.100000
warp lane: 5, val: 6.100000
warp lane: 6, val: 5.100000
warp lane: 7, val: 4.100000
warp lane: 8, val: 8.100000
warp lane: 9, val: 9.100000
warp lane: 10, val: 10.100000
warp lane: 11, val: 11.100000
warp lane: 12, val: 12.100000
warp lane: 13, val: 13.100000
warp lane: 14, val: 14.100000
warp lane: 15, val: 15.100000
warp lane: 16, val: 16.100000
warp lane: 17, val: 17.100000
warp lane: 18, val: 18.100000
warp lane: 19, val: 19.100000
warp lane: 20, val: 20.100000
warp lane: 21, val: 21.100000
warp lane: 22, val: 22.100000
warp lane: 23, val: 23.100000
warp lane: 24, val: 24.100000
warp lane: 25, val: 25.100000
warp lane: 26, val: 26.100000
warp lane: 27, val: 27.100000
warp lane: 28, val: 28.100000
warp lane: 29, val: 29.100000
warp lane: 30, val: 30.100000
warp lane: 31, val: 31.100000
========= ERROR SUMMARY: 0 errors
$
For question 2 the only thing I can come up with seems trivial. As suggested in my answer to question 1, one way to think about a 32 item float array is having the array "spread" across a warp. I believe this gives the most correspondence to AVX style processing.
If we follow that, then the code for question 2 could be trivial:
$ cat t1487.cu
#include <stdio.h>
__global__ void k(int *pattern){
float my_vals[2] = {1.1f, 2.2f};
float my_val = my_vals[pattern[threadIdx.x]];
printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}
int main(){
int pattern[32] = {0,0,1,1,0,0,1,1};
for (int i = 8; i<32; i++) pattern[i] = 0;
int *d_pattern;
cudaMalloc(&d_pattern, sizeof(pattern));
cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
k<<<1,32>>>(d_pattern);
cudaDeviceSynchronize();
}
$ nvcc -o t1487 t1487.cu
$ cuda-memcheck ./t1487
========= CUDA-MEMCHECK
warp lane: 0, val: 1.100000
warp lane: 1, val: 1.100000
warp lane: 2, val: 2.200000
warp lane: 3, val: 2.200000
warp lane: 4, val: 1.100000
warp lane: 5, val: 1.100000
warp lane: 6, val: 2.200000
warp lane: 7, val: 2.200000
warp lane: 8, val: 1.100000
warp lane: 9, val: 1.100000
warp lane: 10, val: 1.100000
warp lane: 11, val: 1.100000
warp lane: 12, val: 1.100000
warp lane: 13, val: 1.100000
warp lane: 14, val: 1.100000
warp lane: 15, val: 1.100000
warp lane: 16, val: 1.100000
warp lane: 17, val: 1.100000
warp lane: 18, val: 1.100000
warp lane: 19, val: 1.100000
warp lane: 20, val: 1.100000
warp lane: 21, val: 1.100000
warp lane: 22, val: 1.100000
warp lane: 23, val: 1.100000
warp lane: 24, val: 1.100000
warp lane: 25, val: 1.100000
warp lane: 26, val: 1.100000
warp lane: 27, val: 1.100000
warp lane: 28, val: 1.100000
warp lane: 29, val: 1.100000
warp lane: 30, val: 1.100000
warp lane: 31, val: 1.100000
========= ERROR SUMMARY: 0 errors
$
If this is for a learning exercise, great. If your interest is to do a robust implementation of a 4x4 batched matrix inverse, I would encourage you to use CUBLAS.