寄存器中的AVX置换和改组的CUDA翻译

时间:2019-08-15 17:41:41

标签: cuda simd avx

我正在尝试将AVX例程转换为CUDA,并且大部分工作非常简单。但是,由于缺少简单的示例,因此有两篇译文让我难以理解。

  1. 如何对寄存器浮点变量(始终为32)执行任意排列?我已经看到__shfl_sync将执行此操作的建议,但是没有示例显示此操作。我想对长度为8的数组进行简单处理的一个小例子:

        protected void doPost(...) {
            //beginning
            User user = new User();
            UserInfo info = new UserInfo();
            //filling both with params
            user.setInfo(info);
            info.setUser(user);
            session.persist(user);
            transaction.commit();
            //rest of class
        }
    
  2. 如何将两个寄存器浮点数合并为一个寄存器浮点数?在numpy中,一个简单的例子是:

    """
    a == some float32 array of length 8;
    specific  patterns will always cycle mod 4
    """
    b = a[[3,2,1,0,7,6,5,4]] 
    

对于任何了解AVX内在函数的人,问题1与""" a == some float32 array of length 8 b == some other float32 array of length 8 specific patterns will always cycle mod 4 """ c = numpy.array([a[0],a[1], b[0],b[1], a[4],a[5], b[4],b[5]]) 的翻译有关,而问题2与_mm256_permute_ps的翻译有关。

2 个答案:

答案 0 :(得分:4)

  

如何对寄存器浮点变量(总是长度为32)执行任意排列?我已经看到__shfl_sync将执行此操作的建议,但是没有示例显示此操作。我想对长度为8的数组进行简单处理的一个小例子:

     

a ==一些长度为8的float32数组;特定模式将始终循环mod 4“”“ b = a [[3,2,1,0,7,6,5,4]]

$ cat t1486.cu
#include <stdio.h>

__global__ void k(int *pattern){

  float my_val = (float)threadIdx.x + 0.1f;
  my_val = __shfl_sync(0xFFFFFFFF, my_val, pattern[threadIdx.x]);
  printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}

int main(){

  int pattern[32] = {3,2,1,0,7,6,5,4};
  for (int i = 8; i<32; i++) pattern[i] = i;
  int *d_pattern;
  cudaMalloc(&d_pattern, sizeof(pattern));
  cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
  k<<<1,32>>>(d_pattern);
  cudaDeviceSynchronize();
}


$ nvcc -o t1486 t1486.cu
$ cuda-memcheck ./t1486
========= CUDA-MEMCHECK
warp lane: 0, val: 3.100000
warp lane: 1, val: 2.100000
warp lane: 2, val: 1.100000
warp lane: 3, val: 0.100000
warp lane: 4, val: 7.100000
warp lane: 5, val: 6.100000
warp lane: 6, val: 5.100000
warp lane: 7, val: 4.100000
warp lane: 8, val: 8.100000
warp lane: 9, val: 9.100000
warp lane: 10, val: 10.100000
warp lane: 11, val: 11.100000
warp lane: 12, val: 12.100000
warp lane: 13, val: 13.100000
warp lane: 14, val: 14.100000
warp lane: 15, val: 15.100000
warp lane: 16, val: 16.100000
warp lane: 17, val: 17.100000
warp lane: 18, val: 18.100000
warp lane: 19, val: 19.100000
warp lane: 20, val: 20.100000
warp lane: 21, val: 21.100000
warp lane: 22, val: 22.100000
warp lane: 23, val: 23.100000
warp lane: 24, val: 24.100000
warp lane: 25, val: 25.100000
warp lane: 26, val: 26.100000
warp lane: 27, val: 27.100000
warp lane: 28, val: 28.100000
warp lane: 29, val: 29.100000
warp lane: 30, val: 30.100000
warp lane: 31, val: 31.100000
========= ERROR SUMMARY: 0 errors
$

对于问题2,我唯一能想到的就是琐碎的事情。正如我对问题1的回答所建议的,考虑32个项float数组的一种方法是使数组“分布”在整个扭曲上。我认为这与AVX样式处理最相符。

如果我们遵循该原则,那么问题2的代码可能就很简单了:

$ cat t1487.cu
#include <stdio.h>

__global__ void k(int *pattern){

  float my_vals[2] = {1.1f, 2.2f};
  float my_val = my_vals[pattern[threadIdx.x]];
  printf("warp lane: %d, val: %f\n", threadIdx.x&31, my_val);
}

int main(){

  int pattern[32] = {0,0,1,1,0,0,1,1};
  for (int i = 8; i<32; i++) pattern[i] = 0;
  int *d_pattern;
  cudaMalloc(&d_pattern, sizeof(pattern));
  cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
  k<<<1,32>>>(d_pattern);
  cudaDeviceSynchronize();
}


$ nvcc -o t1487 t1487.cu
$ cuda-memcheck ./t1487
========= CUDA-MEMCHECK
warp lane: 0, val: 1.100000
warp lane: 1, val: 1.100000
warp lane: 2, val: 2.200000
warp lane: 3, val: 2.200000
warp lane: 4, val: 1.100000
warp lane: 5, val: 1.100000
warp lane: 6, val: 2.200000
warp lane: 7, val: 2.200000
warp lane: 8, val: 1.100000
warp lane: 9, val: 1.100000
warp lane: 10, val: 1.100000
warp lane: 11, val: 1.100000
warp lane: 12, val: 1.100000
warp lane: 13, val: 1.100000
warp lane: 14, val: 1.100000
warp lane: 15, val: 1.100000
warp lane: 16, val: 1.100000
warp lane: 17, val: 1.100000
warp lane: 18, val: 1.100000
warp lane: 19, val: 1.100000
warp lane: 20, val: 1.100000
warp lane: 21, val: 1.100000
warp lane: 22, val: 1.100000
warp lane: 23, val: 1.100000
warp lane: 24, val: 1.100000
warp lane: 25, val: 1.100000
warp lane: 26, val: 1.100000
warp lane: 27, val: 1.100000
warp lane: 28, val: 1.100000
warp lane: 29, val: 1.100000
warp lane: 30, val: 1.100000
warp lane: 31, val: 1.100000
========= ERROR SUMMARY: 0 errors
$

如果这是一个学习练习,那就太好了。如果您想对4x4批处理矩阵逆执行一个健壮的实现,我建议您使用CUBLAS

答案 1 :(得分:1)

在罗伯特发表他的问题之前,我对问题2有第二种解决方案。我将不得不研究接受的更多一点,但是在这一点上,我有多种选择感到非常兴奋。

$ cat t1486.cu
#include <stdio.h>

__device__ unsigned pat[4];
const unsigned hpat[4] = {1, 1, 0, 0};

__global__ void k(int *pattern){

  float my_val = (float)threadIdx.x + 0.0f;
  float my_val1 = (float)threadIdx.x + 32.0f;
  float out_val = 0.0;
  out_val = my_val*pat[threadIdx.x%4];
  out_val += __shfl_up_sync(0xFFFFFFFF, my_val1, 2, 4)*(1-pat[threadIdx.x%4]);
  printf("warp lane: %d, val: %f\n", threadIdx.x&31, out_val);
}

int main(){

  int pattern[32] = {3,2,1,0,7,6,5,4};
  for (int i = 8; i<32; i++) pattern[i] = i;
  int *d_pattern;
  cudaMemcpyToSymbol(pat, hpat, 4*sizeof(unsigned));
  cudaMalloc(&d_pattern, sizeof(pattern));
  cudaMemcpy(d_pattern, pattern, sizeof(pattern), cudaMemcpyHostToDevice);
  k<<<1,32>>>(d_pattern);
  cudaDeviceSynchronize();
}

$ nvcc -o t1486 t1486.cu
$ ./t1486
warp lane: 0, val: 0.000000
warp lane: 1, val: 1.000000
warp lane: 2, val: 32.000000
warp lane: 3, val: 33.000000
warp lane: 4, val: 4.000000
warp lane: 5, val: 5.000000
warp lane: 6, val: 36.000000
warp lane: 7, val: 37.000000
warp lane: 8, val: 8.000000
warp lane: 9, val: 9.000000
warp lane: 10, val: 40.000000
warp lane: 11, val: 41.000000
warp lane: 12, val: 12.000000
warp lane: 13, val: 13.000000
warp lane: 14, val: 44.000000
warp lane: 15, val: 45.000000
warp lane: 16, val: 16.000000
warp lane: 17, val: 17.000000
warp lane: 18, val: 48.000000
warp lane: 19, val: 49.000000
warp lane: 20, val: 20.000000
warp lane: 21, val: 21.000000
warp lane: 22, val: 52.000000
warp lane: 23, val: 53.000000
warp lane: 24, val: 24.000000
warp lane: 25, val: 25.000000
warp lane: 26, val: 56.000000
warp lane: 27, val: 57.000000
warp lane: 28, val: 28.000000
warp lane: 29, val: 29.000000
warp lane: 30, val: 60.000000
warp lane: 31, val: 61.000000