使用CUDA添加大整数

时间:2012-10-18 14:35:02

标签: c cuda gpgpu thrust

我一直在GPU上开发加密算法,目前仍然使用算法来执行大整数加法。大整数以通常的方式表示为一堆32位字。

例如,我们可以使用一个线程添加两个32位字。为简单起见,我们假设 要添加的数字具有相同的长度和每个块的线程数==单词数。然后:

__global__ void add_kernel(int *C, const int *A, const int *B) {
     int x = A[threadIdx.x];
     int y = B[threadIdx.x];
     int z = x + y;
     int carry = (z < x);
     /** do carry propagation in parallel somehow ? */
     ............

     z = z + newcarry; // update the resulting words after carry propagation
     C[threadIdx.x] = z;
 }

我很确定有一种方法可以通过一些棘手的减少程序来进行传播,但是无法解决这个问题。

我查看了CUDA thrust extensions但是大整数包似乎还没有实现。 也许有人可以给我一个暗示如何在CUDA上做到这一点?

2 个答案:

答案 0 :(得分:8)

你是对的,进位传播可以通过前缀和计算完成,但是为这个操作定义二进制函数并证明它是关联的(并行前缀和需要)有点棘手。事实上,该算法在Carry-lookahead adder中使用(理论上)。

假设我们有两个大整数a [0..n-1]和b [0..n-1]。 然后我们计算(i = 0..n-1):

s[i] = a[i] + b[i]l;
carryin[i] = (s[i] < a[i]);

我们定义了两个函数:

generate[i] = carryin[i];
propagate[i] = (s[i] == 0xffffffff);

具有非常直观的含义:generate [i] == 1表示进位生成于 当传播[i] == 1时,位置i意味着进位将从位置传播 (i - 1)到(i + 1)。我们的目标是计算用于更新结果和[0..n-1]的函数进位[0..n-1]。 carryout可以递归计算如下:

carryout[i] = generate[i] OR (propagate[i] AND carryout[i-1])
carryout[0] = 0

这里carryout [i] == 1如果在位置i生成进位,则有时更早生成并传播到位置i。最后,我们更新结果总和:

s[i] = s[i] + carryout[i-1];  for i = 1..n-1
carry = carryout[n-1];

现在证明进位函数确实是二进制关联并因此适用并行前缀和计算是非常简单的。为了在CUDA上实现这一点,我们可以在单个变量中合并标记'generate'和'propagate',因为它们是互斥的,即:

cy[i] = (s[i] == -1u ? -1u : 0) | carryin[i];

换句话说,

cy[i] = 0xffffffff  if propagate[i]
cy[i] = 1           if generate[i]
cy[u] = 0           otherwise

然后,可以验证以下公式计算进位函数的前缀和:

cy[i] = max((int)cy[i], (int)cy[k]) & cy[i];

对于所有k <一世。下面的示例代码显示了2048字整数的大量添加。这里我使用了512个线程的CUDA块:

// add & output carry flag
#define UADDO(c, a, b) \ 
     asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
// add with carry & output carry flag
#define UADDC(c, a, b) \ 
     asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));

#define WS 32

__global__ void bignum_add(unsigned *g_R, const unsigned *g_A,const unsigned *g_B) {

extern __shared__ unsigned shared[];
unsigned *r = shared; 

const unsigned N_THIDS = 512;
unsigned thid = threadIdx.x, thid_in_warp = thid & WS-1;
unsigned ofs, cf;

uint4 a = ((const uint4 *)g_A)[thid],
      b = ((const uint4 *)g_B)[thid];

UADDO(a.x, a.x, b.x) // adding 128-bit chunks with carry flag
UADDC(a.y, a.y, b.y)
UADDC(a.z, a.z, b.z)
UADDC(a.w, a.w, b.w)
UADDC(cf, 0, 0) // save carry-out

// memory consumption: 49 * N_THIDS / 64
// use "alternating" data layout for each pair of warps
volatile short *scan = (volatile short *)(r + 16 + thid_in_warp +
        49 * (thid / 64)) + ((thid / 32) & 1);

scan[-32] = -1; // put identity element
if(a.x == -1u && a.x == a.y && a.x == a.z && a.x == a.w)
    // this indicates that carry will propagate through the number
    cf = -1u;

// "Hillis-and-Steele-style" reduction 
scan[0] = cf;
cf = max((int)cf, (int)scan[-2]) & cf;
scan[0] = cf;
cf = max((int)cf, (int)scan[-4]) & cf;
scan[0] = cf;
cf = max((int)cf, (int)scan[-8]) & cf;
scan[0] = cf;
cf = max((int)cf, (int)scan[-16]) & cf;
scan[0] = cf;
cf = max((int)cf, (int)scan[-32]) & cf;
scan[0] = cf;

int *postscan = (int *)r + 16 + 49 * (N_THIDS / 64);
if(thid_in_warp == WS - 1) // scan leading carry-outs once again
    postscan[thid >> 5] = cf;

__syncthreads();

if(thid < N_THIDS / 32) {
    volatile int *t = (volatile int *)postscan + thid;
    t[-8] = -1; // load identity symbol
    cf = t[0];
    cf = max((int)cf, (int)t[-1]) & cf;
    t[0] = cf;
    cf = max((int)cf, (int)t[-2]) & cf;
    t[0] = cf;
    cf = max((int)cf, (int)t[-4]) & cf;
    t[0] = cf;
}
__syncthreads();

cf = scan[0];
int ps = postscan[(int)((thid >> 5) - 1)]; // postscan[-1] equals to -1
scan[0] = max((int)cf, ps) & cf; // update carry flags within warps
cf = scan[-2];

if(thid_in_warp == 0)
    cf = ps;
if((int)cf < 0)
    cf = 0;

UADDO(a.x, a.x, cf) // propagate carry flag if needed
UADDC(a.y, a.y, 0)
UADDC(a.z, a.z, 0)
UADDC(a.w, a.w, 0)
((uint4 *)g_R)[thid] = a;
}

请注意,由于CUDA 4.0具有相应的内在函数,因此可能不再需要宏UADDO / UADDC(但我不完全确定)。

同时请注意,尽管并行缩减非常快,但如果需要连续添加几个大整数,最好使用一些冗余表示(在上面的注释中建议),即首先累积结果在64位字中添加,然后在“一次扫描”的最后执行一次进位传播。

答案 1 :(得分:4)

我想我也会发布我的答案,除了@asm之外,所以这个SO问题可以是一种思想库。与@asm类似,我检测并存储进位条件以及“进位”条件,即。当中间词结果全是1(0xF ... FFF)时,如果一个进位传播到这个单词中,它将“携带”到下一个单词。

我的代码中没有使用任何PTX或asm,因此我选择使用64位无符号整数而不是32位,以使用1024个线程来实现2048x32bit的能力。

与@ asm代码的较大差异在于我的并行进位传播方案。我构造了一个位打包的数组(“进位”),其中每个位表示从1024个线程中的每个线程的独立中间64位加法生成的进位条件。我还构造了一个位打包的数组(“carry_through”),其中每个位代表各个64位中间结果的carry_through条件。对于1024个线程,对于每个位打包阵列,这相当于1024/64 = 16x64位字的共享内存,因此总共享内存使用量为64 + 3 32位量。使用这些位打包数组,我执行以下操作以生成组合传播进位指示符:

carry = carry | (carry_through ^ ((carry & carry_through) + carry_through);

(注意进位左移1:进位[i]表示[i-1] + b [i-1]的结果产生进位) 解释如下:

  1. 按位和进位和进位生成进位的候选 与一个或多个携带条件的序列相互作用
  2. 将第一步的结果添加到carry_through会生成一个结果 已更改位表示将受影响的所有单词 将进位传播到进位序列
  3. 获取carry_through的加值或加上步骤2的结果 显示用1位
  4. 表示的受影响结果
  5. 取步骤3或普通的按位或结果 携带指标给出了一个组合的携带条件,然后 用于更新所有中间结果。
  6. 请注意,步骤2中的添加需要另一个多字添加(对于由超过64个字组成的大整数)。我相信这个算法有效,它已经通过我抛出的测试用例。

    以下是我的示例代码:

    // parallel add of large integers
    // requires CC 2.0 or higher
    // compile with:
    // nvcc -O3 -arch=sm_20 -o paradd2 paradd2.cu
    #include <stdio.h>
    #include <stdlib.h>
    
    #define MAXSIZE 1024 // the number of 64 bit quantities that can be added
    #define LLBITS 64  // the number of bits in a long long
    #define BSIZE ((MAXSIZE + LLBITS -1)/LLBITS) // MAXSIZE when packed into bits
    #define nTPB MAXSIZE
    
    // define either GPU or GPUCOPY, not both -- for timing
    #define GPU
    //#define GPUCOPY
    
    #define LOOPCNT 1000
    
    #define cudaCheckErrors(msg) \
        do { \
            cudaError_t __err = cudaGetLastError(); \
            if (__err != cudaSuccess) { \
                fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                    msg, cudaGetErrorString(__err), \
                    __FILE__, __LINE__); \
                fprintf(stderr, "*** FAILED - ABORTING\n"); \
                exit(1); \
            } \
        } while (0)
    
    // perform c = a + b, for unsigned integers of psize*64 bits.
    // all work done in a single threadblock.
    // multiple threadblocks are handling multiple separate addition problems
    // least significant word is at a[0], etc.
    
    __global__ void paradd(const unsigned size, const unsigned psize, unsigned long long *c, const unsigned long long *a, const unsigned long long *b){
    
      __shared__ unsigned long long carry_through[BSIZE];
      __shared__ unsigned long long carry[BSIZE+1];
      __shared__ volatile unsigned mcarry;
      __shared__ volatile unsigned mcarry_through;
    
      unsigned idx = threadIdx.x + (psize * blockIdx.x);
      if ((threadIdx.x < psize) && (idx < size)){
        // handle 64 bit unsigned add first
        unsigned long long cr1 = a[idx];
        unsigned long long lc = cr1 + b[idx];
        // handle carry
        if (threadIdx.x < BSIZE){
          carry[threadIdx.x] = 0;
          carry_through[threadIdx.x] = 0;
          }
        if (threadIdx.x == 0){
          mcarry = 0;
          mcarry_through = 0;
          }
        __syncthreads();
        if (lc < cr1){
          if ((threadIdx.x%LLBITS) != (LLBITS-1))  
            atomicAdd(&(carry[threadIdx.x/LLBITS]), (2ull<<(threadIdx.x%LLBITS)));
          else atomicAdd(&(carry[(threadIdx.x/LLBITS)+1]), 1);
          }
        // handle carry-through
        if (lc == 0xFFFFFFFFFFFFFFFFull) 
          atomicAdd(&(carry_through[threadIdx.x/LLBITS]), (1ull<<(threadIdx.x%LLBITS))); 
        __syncthreads();
        if (threadIdx.x < ((psize + LLBITS-1)/LLBITS)){
          // only 1 warp executing within this if statement
          unsigned long long cr3 = carry_through[threadIdx.x];
          cr1 = carry[threadIdx.x] & cr3;
          // start of sub-add
          unsigned long long cr2 = cr3 + cr1;
          if (cr2 < cr1) atomicAdd((unsigned *)&mcarry, (2u<<(threadIdx.x)));
          if (cr2 == 0xFFFFFFFFFFFFFFFFull) atomicAdd((unsigned *)&mcarry_through, (1u<<threadIdx.x));
          if (threadIdx.x == 0) {
            unsigned cr4 = mcarry & mcarry_through;
            cr4 += mcarry_through;
            mcarry |= (mcarry_through ^ cr4); 
            }
          if (mcarry & (1u<<threadIdx.x)) cr2++;
          // end of sub-add
          carry[threadIdx.x] |= (cr2 ^ cr3);
          }
        __syncthreads();
        if (carry[threadIdx.x/LLBITS] & (1ull<<(threadIdx.x%LLBITS))) lc++;
        c[idx] = lc;
      }
    }
    
    int main() {
    
      unsigned long long *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *c;
      unsigned at_once = 256;   // valid range = 1 .. 65535
      unsigned prob_size = MAXSIZE ; // valid range = 1 .. MAXSIZE
      unsigned dsize = at_once * prob_size;
      cudaEvent_t t_start_gpu, t_start_cpu, t_end_gpu, t_end_cpu;
      float et_gpu, et_cpu, tot_gpu, tot_cpu;
      tot_gpu = 0;
      tot_cpu = 0;
    
    
      if (sizeof(unsigned long long) != (LLBITS/8)) {printf("Word Size Error\n"); return 1;}
      if ((c = (unsigned long long *)malloc(dsize * sizeof(unsigned long long)))  == 0) {printf("Malloc Fail\n"); return 1;}
    
      cudaHostAlloc((void **)&h_a, dsize * sizeof(unsigned long long), cudaHostAllocDefault);
      cudaCheckErrors("cudaHostAlloc1 fail");
      cudaHostAlloc((void **)&h_b, dsize * sizeof(unsigned long long), cudaHostAllocDefault);
      cudaCheckErrors("cudaHostAlloc2 fail");
      cudaHostAlloc((void **)&h_c, dsize * sizeof(unsigned long long), cudaHostAllocDefault);
      cudaCheckErrors("cudaHostAlloc3 fail");
    
      cudaMalloc((void **)&d_a, dsize * sizeof(unsigned long long));
      cudaCheckErrors("cudaMalloc1 fail");
      cudaMalloc((void **)&d_b, dsize * sizeof(unsigned long long));
      cudaCheckErrors("cudaMalloc2 fail");
      cudaMalloc((void **)&d_c, dsize * sizeof(unsigned long long));
      cudaCheckErrors("cudaMalloc3 fail");
      cudaMemset(d_c, 0, dsize*sizeof(unsigned long long));
    
      cudaEventCreate(&t_start_gpu);
      cudaEventCreate(&t_end_gpu);
      cudaEventCreate(&t_start_cpu);
      cudaEventCreate(&t_end_cpu);
    
      for (unsigned loops = 0; loops <LOOPCNT; loops++){
      //create some test cases
      if (loops == 0){
      for (int j=0; j<at_once; j++)
      for (int k=0; k<prob_size; k++){
        int i= (j*prob_size) + k;
        h_a[i] = 0xFFFFFFFFFFFFFFFFull;
        h_b[i] = 0;
        }
        h_a[prob_size-1] = 0;
        h_b[prob_size-1] = 1;
        h_b[0] = 1;
      }
      else if (loops == 1){
      for (int i=0; i<dsize; i++){
        h_a[i] = 0xFFFFFFFFFFFFFFFFull;
        h_b[i] = 0;
        }
        h_b[0] = 1;
      }
      else if (loops == 2){
      for (int i=0; i<dsize; i++){
        h_a[i] = 0xFFFFFFFFFFFFFFFEull;
        h_b[i] = 2;
        }
        h_b[0] = 1;
      }
      else {
      for (int i = 0; i<dsize; i++){
        h_a[i] = (((unsigned long long)lrand48())<<33) + (unsigned long long)lrand48();
        h_b[i] = (((unsigned long long)lrand48())<<33) + (unsigned long long)lrand48();
        }
      }
    #ifdef GPUCOPY
      cudaEventRecord(t_start_gpu, 0);
    #endif
      cudaMemcpy(d_a, h_a, dsize*sizeof(unsigned long long), cudaMemcpyHostToDevice);
      cudaCheckErrors("cudaMemcpy1 fail");
      cudaMemcpy(d_b, h_b, dsize*sizeof(unsigned long long), cudaMemcpyHostToDevice);
      cudaCheckErrors("cudaMemcpy2 fail");
    #ifdef GPU
      cudaEventRecord(t_start_gpu, 0);
    #endif
      paradd<<<at_once, nTPB>>>(dsize, prob_size, d_c, d_a, d_b);
      cudaCheckErrors("Kernel Fail");
    #ifdef GPU
      cudaEventRecord(t_end_gpu, 0);
    #endif
      cudaMemcpy(h_c, d_c, dsize*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
      cudaCheckErrors("cudaMemcpy3 fail");
    #ifdef GPUCOPY
      cudaEventRecord(t_end_gpu, 0);
    #endif
      cudaEventSynchronize(t_end_gpu);
      cudaEventElapsedTime(&et_gpu, t_start_gpu, t_end_gpu);
      tot_gpu += et_gpu;
      cudaEventRecord(t_start_cpu, 0);
      //also compute result on CPU for comparison
      for (int j=0; j<at_once; j++) {
        unsigned rc=0;
        for (int n=0; n<prob_size; n++){
          unsigned i = (j*prob_size) + n;
          c[i] = h_a[i] + h_b[i];
          if (c[i] < h_a[i]) {
            c[i] += rc;
            rc=1;}
          else {
            if ((c[i] += rc) != 0) rc=0;
            }
          if (c[i] != h_c[i]) {printf("Results mismatch at offset %d, GPU = 0x%lX, CPU = 0x%lX\n", i, h_c[i], c[i]); return 1;}
          }
        }
      cudaEventRecord(t_end_cpu, 0);
      cudaEventSynchronize(t_end_cpu);
      cudaEventElapsedTime(&et_cpu, t_start_cpu, t_end_cpu);
      tot_cpu += et_cpu;
      if ((loops%(LOOPCNT/10)) == 0) printf("*\n");
      }
      printf("\nResults Match!\n");
      printf("Average GPU time = %fms\n", (tot_gpu/LOOPCNT));
      printf("Average CPU time = %fms\n", (tot_cpu/LOOPCNT));
    
      return 0;
    }