如何在Windows / MSVC环境中使用Qt编译CUDA代码?

时间:2018-05-23 09:49:51

标签: c++ qt cuda visual-studio-2017

经过长时间的故障排除后,我设法让我的小测试程序与Qt Creator一起工作(从我看过,有几个人遇到了麻烦)。 我在这里分享一个解决方案(请参阅我的回答),随时评论或纠正可以改进的事情,特别是如果有人能解决下面提到的问题

我遇到的两个问题:

  • 可能有一种方法可以让所有内容一次编译和链接,但是当我尝试这个时,我总会得到一个奇怪的错误,指出无法找到main.cpp而MakeFile中的所有路径都是正确的。

  • 此外,我不确切知道原因,但使用-dlink或-dc选项启用可重定位代码会产生_cudaRegisterLinkedBinary外部符号错误。可重定位代码是否可以与单独的编译一起使用?这是因为编译时分配问题吗?

2 个答案:

答案 0 :(得分:1)

这是一个可能的解决方案。

基本思想是编码矩阵乘法例程。为此,我只使用了一个matMul.cu文件,其中包含一个调用CUDA内核函数的包装函数。然后,使用以下命令使用nvcc编译此文件:

nvcc.exe -lib -o lib_cuda / matMul.lib -c matMul.cu

拥有.lib文件,我可以使用带有静态链接的“添加库”工具在Qt中添加库,该工具会自动添加.pro文件中的最后8行。

以下是项目文件:

.pro文件:

QT -= gui

CONFIG += c++11 console
CONFIG -= app_bundle

SOURCES +=  main.cpp

OTHER_FILES =+ matMul.cu

# The following library conflicts with something in Cuda
QMAKE_LFLAGS_RELEASE = /NODEFAULTLIB:msvcrt.lib
QMAKE_LFLAGS_DEBUG   = /NODEFAULTLIB:msvcrtd.lib
QMAKE_LFLAGS_DEBUG   = /NODEFAULTLIB:libcmt.lib


# Used to avoid conflicting flags between CUDA and MSVC files, should make everything static
QMAKE_CFLAGS_DEBUG      += /MTd
QMAKE_CFLAGS_RELEASE    += /MT
QMAKE_CXXFLAGS_DEBUG    += /MTd
QMAKE_CXXFLAGS_RELEASE  += /MT

# CUDA settings <-- may change depending on your system
CUDA_DIR        = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.1"            # Path to cuda toolkit install
SYSTEM_NAME     = x64

# include paths
CUDA_INC += $$CUDA_DIR\include
INCLUDEPATH += $$CUDA_INC

# library directories
QMAKE_LIBDIR += $$CUDA_DIR/lib/$$SYSTEM_NAME \

# Add the necessary CUDA libraries
LIBS += -lcuda -lcudart

# Add project related libraries containing kernels
win32: LIBS += -L$$PWD/lib_cuda/ -lmatMul_d

INCLUDEPATH += $$PWD/lib_cuda
DEPENDPATH += $$PWD/lib_cuda

win32:!win32-g++: PRE_TARGETDEPS += $$PWD/lib_cuda/matMul_d.lib
else:win32-g++: PRE_TARGETDEPS += $$PWD/lib_cuda/libmatMul_d.a

main.cpp中:

#include <cmath>
#include <chrono>
#include <iostream>

#include <cuda.h>
#include <cuda_runtime.h>

typedef struct
{
    int width;
    int height;
    int stride;

    float *elements;
} Matrix;


void matMul_wrapper(Matrix &C, const Matrix &A, const Matrix &B, cudaDeviceProp devProp);

int main()
{
    int devCount;
    cudaGetDeviceCount(&devCount);

    cudaDeviceProp devProp;
    for(int i=0; i < devCount; ++i)
    {
     cudaGetDeviceProperties(&devProp, i);
     std::cout << "\nDevice: "                   << devProp.name << "\n";
     std::cout << "  Compute capability:     " << devProp.major << "\n";
     std::cout << "  Max threads per block:  " << devProp.maxThreadsPerBlock << "\n";
     std::cout << "  Warp size:              " << devProp.warpSize << "\n\n";
    }


    Matrix A {1000, 1000, 1, new float[1000*1000]};
    Matrix B {1000, 1000, 1, new float[1000*1000]};
    Matrix C {B.width, A.height, 1, new float[1000*1000]};


    for(int row=0; row < A.height; ++row)
    {
     for(int col=0; col < A.width; ++col)
         A.elements[row*A.width + col] = (float)(row*A.width + col) / (float)100000;
    }

    for(int row=0; row < B.height; ++row)
    {
     for(int col=0; col < B.width; ++col)
         B.elements[row*B.width + col] = (float)(row*B.width + col) / (float)100000;
    }

    std::cout << A.elements[20000] << '\n';

    matMul_wrapper(C, A, B, devProp);

    std::cout << A.elements[20000] << '\n';

    delete[] A.elements;
    delete[] B.elements;
    delete[] C.elements;

    return 0;
}

matMul.cu:

#include <cuda.h>
#include <cuda_runtime.h>

#define BLOCK_SIZE 16

typedef struct
{
    int width;
    int height;
    int stride;

    float *elements;
} Matrix;

__global__
void matMulKernel(Matrix C, const Matrix A, const Matrix B)
{
    int col = blockIdx.x * blockDim.x + threadIdx.x;
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int idx = row*C.width + col;

    float out = 0;
    if(idx < C.width * C.height)
    {
        for(int j=0; j < A.width; ++j)
            out += A.elements[row*A.width + j] * B.elements[j*B.width + col];
    }

    C.elements[idx] = out;

}


void matMul_wrapper(Matrix &C, const Matrix &A, const Matrix &B, cudaDeviceProp devProp)
{
    dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
    dim3 grid(  (C.width + block.x - 1) / block.x,
                (C.height + block.y - 1) / block.y,
                1);

    Matrix d_A {A.width, A.height, A.stride};
    size_t size = A.height * A.width * sizeof(float);
    cudaMallocManaged(&d_A.elements, size);
    cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);

    Matrix d_B {B.width, B.height, B.stride};
    size = B.height * B.width * sizeof(float);
    cudaMallocManaged(&d_B.elements, size);
    cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);

    Matrix d_C {C.width, C.height, C.stride};
    size = C.height * C.width * sizeof(float);
    cudaMallocManaged(&d_C.elements, size);
    cudaMemcpy(d_C.elements, C.elements, size, cudaMemcpyHostToDevice);

    matMulKernel<<<grid, block>>>(d_C, d_A, d_B);

    cudaDeviceSynchronize();

    cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);

    cudaFree(d_A.elements);
    cudaFree(d_B.elements);
    cudaFree(d_C.elements);
}

希望这会有所帮助。

答案 1 :(得分:-1)

我发现一个解决方案正在Windows 10和MSVC 2017上运行。

对于您的方法,必须使用链接器。 在我的.cpp类中,我使用此链接器:

extern "C" void testCuda(uint8_t* output, uint8_t* input, int blockCount, int threadCount);

我的.cu类如下:

// System includes
#include <stdlib.h>
#include <stdio.h>

// CUDA runtime
#include <cuda_runtime.h>

// helper functions and utilities to work with CUDA
#include "include/helper_cuda.h"
#include "include/helper_functions.h"

__global__ void testInside(uint8_t* output, uint8_t* input)
{
    output[threadIdx.x + blockIdx.x * blockDim.x] = 256 - input[threadIdx.x + blockIdx.x * blockDim.x];
}

////////////////////////////////////////////////////////////////////////////////
//! Entry point for Cuda functionality on host side
//! @param argc  command line argument count
//! @param argv  command line arguments
//! @param data  data to process on the device
//! @param len   len of \a data
////////////////////////////////////////////////////////////////////////////////

extern "C" bool runTest(const int argc, const char** argv, char* data, int2* data_int2, unsigned int len)
{
    // Find the best Cuda device
    //findCudaDevice(argc, static_cast<const char**>(argv));

    return false;
}

extern "C" void testCuda(uint8_t* output, uint8_t* input, int grid, int threads)
{
    testInside << <grid, threads >> > (output, input);
}

在我的.pro文件中,我enter code here使用以下代码段:

CUDA_OBJECTS_DIR = OBJECTS_DIR/../cuda

# C++ flags
QMAKE_CXXFLAGS_RELEASE =-O3

# MSVCRT link option (static or dynamic, it must be the same with your Qt SDK link option)
MSVCRT_LINK_FLAG_DEBUG   = "/MDd"
MSVCRT_LINK_FLAG_RELEASE = "/MD"

# CUDA settings
CUDA_DIR = $$(CUDA_PATH)            # Path to cuda toolkit install
SYSTEM_NAME = x64                   # Depending on your system either 'Win32', 'x64', or 'Win64'
SYSTEM_TYPE = 64                    # '32' or '64', depending on your system
CUDA_ARCH = sm_50                   # Type of CUDA architecture
NVCC_OPTIONS = --use_fast_math

# include paths
INCLUDEPATH += $$CUDA_DIR/include \
               $$CUDA_DIR/common/inc \
               $$CUDA_DIR/../shared/inc

# library directories
QMAKE_LIBDIR += $$CUDA_DIR/lib/$$SYSTEM_NAME \
                $$CUDA_DIR/common/lib/$$SYSTEM_NAME \
                $$CUDA_DIR/../shared/lib/$$SYSTEM_NAME

# The following makes sure all path names (which often include spaces) are put between quotation marks
CUDA_INC = $$join(INCLUDEPATH,'" -I"','-I"','"')

# Add the necessary libraries
CUDA_LIB_NAMES = cudart_static kernel32 user32 gdi32 winspool comdlg32 \
                 advapi32 shell32 ole32 oleaut32 uuid odbc32 odbccp32 \
                 #freeglut glew32

for(lib, CUDA_LIB_NAMES) {
    CUDA_LIBS += -l$$lib
}
LIBS += $$CUDA_LIBS

# Configuration of the Cuda compiler
CONFIG(debug, debug|release) {
    # Debug mode
    cuda_d.input = CUDA_SOURCES
    cuda_d.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.obj
    cuda_d.commands = $$CUDA_DIR/bin/nvcc.exe -D_DEBUG $$NVCC_OPTIONS $$CUDA_INC $$LIBS \
                      --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH \
                      --compile -cudart static -g -DWIN32 -D_MBCS \
                      -Xcompiler "/wd4819,/EHsc,/W3,/nologo,/Od,/Zi,/RTC1" \
                      -Xcompiler $$MSVCRT_LINK_FLAG_DEBUG \
                      -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
    cuda_d.dependency_type = TYPE_C
    QMAKE_EXTRA_COMPILERS += cuda_d
}
else {
    # Release mode
    cuda.input = CUDA_SOURCES
    cuda.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.obj
    cuda.commands = $$CUDA_DIR/bin/nvcc.exe $$NVCC_OPTIONS $$CUDA_INC $$LIBS \
                    --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH \
                    --compile -cudart static -DWIN32 -D_MBCS \
                    -Xcompiler "/wd4819,/EHsc,/W3,/nologo,/O2,/Zi" \
                    -Xcompiler $$MSVCRT_LINK_FLAG_RELEASE \
                    -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
    cuda.dependency_type = TYPE_C
    QMAKE_EXTRA_COMPILERS += cuda
}

我希望它能对某人有所帮助。