调整CMakeList.txt以使用CUDA运行

时间:2018-04-26 23:23:04

标签: c++ makefile cmake cuda nvcc

我正在使用大满贯系统,我已经安装了dso,代码可以在这里看到::

https://github.com/JakobEngel/dso

一切正常,我设法编译并运行没有错误。但是我知道我想使用CUDA并行化代码。我很难适应它的CMakeLists.txt以便能够使用CUDA。来自dso的原始CMakeLists可在此处获取:

dso CMakeLists.txt

我试图根据我在另一个SLAM系统上对另一位作者的实现所做的更改来调整它:

ORB SLAM 2 CMakeLists.txt using CUDA

现在我的CMakeLists,我的更改(不工作),是这样的:

SET(PROJECT_NAME DSO)

PROJECT(${PROJECT_NAME})
CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
#set(CMAKE_VERBOSE_MAKEFILE ON)

set(BUILD_TYPE Release)
#set(BUILD_TYPE RelWithDebInfo) 

set(EXECUTABLE_OUTPUT_PATH bin)
set(LIBRARY_OUTPUT_PATH lib)
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)

# required libraries
#SET(CMAKE_INCLUDE_PATH ${CMAKE_INCLUDE_PATH} "/usr/include")
find_package(SuiteParse REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(Boost)

# optional libraries
find_package(LibZip QUIET)
find_package(Pangolin 0.2 QUIET)
find_package(OpenCV QUIET)
#find_package(OpenACC)

# flags
add_definitions("-DENABLE_SSE")
set(CMAKE_CXX_FLAGS
   "${SSE_FLAGS} -O3 -g -std=c++11"
)

set(CMAKE_C_FLAGS
    "${SSE_FLAGS} -O3 -g -std=c++11"
)

#LIST(APPEND CMAKE_C_FLAGS "-Wall -Wextra -DUSE_NVTX") <<<< Error: doesn't recognize -Wall -Wextra
#LIST(APPEND CMAKE_CXX_FLAGS "-Wall -Wextra -DUSE_NVTX") << Error: doesn't recognize -Wall -Wextra

find_package(CUDA REQUIRED)
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
SET(CUDA_HOST_COMPILER /usr/bin/g++)
LIST(APPEND CUDA_NVCC_FLAGS "--compiler-options -fno-strict-aliasing -use_fast_math -ccbin gcc-5")

set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11")

if (MSVC)
     set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")
endif (MSVC)

set(CMAKE_LIBRARY_OUTPUT_DIRECTORY lib)

cuda_include_directories(
  ${CUDA_TOOLKIT_ROOT_DIR}/samples/common/inc
)



# Sources files
set(dso_SOURCE_FILES
  ${PROJECT_SOURCE_DIR}/src/FullSystem/FullSystem.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/FullSystemOptimize.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/FullSystemOptPoint.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/FullSystemDebugStuff.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/FullSystemMarginalize.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/Residuals.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/CoarseTracker.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/CoarseInitializer.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/ImmaturePoint.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/HessianBlocks.cpp
  ${PROJECT_SOURCE_DIR}/src/FullSystem/PixelSelector2.cpp
  ${PROJECT_SOURCE_DIR}/src/OptimizationBackend/EnergyFunctional.cpp
  ${PROJECT_SOURCE_DIR}/src/OptimizationBackend/AccumulatedTopHessian.cpp
  ${PROJECT_SOURCE_DIR}/src/OptimizationBackend/AccumulatedSCHessian.cpp
  ${PROJECT_SOURCE_DIR}/src/OptimizationBackend/EnergyFunctionalStructs.cpp
  ${PROJECT_SOURCE_DIR}/src/util/settings.cpp
  ${PROJECT_SOURCE_DIR}/src/util/Undistort.cpp
  ${PROJECT_SOURCE_DIR}/src/util/globalCalib.cpp
)


include_directories(
  ${PROJECT_SOURCE_DIR}/src
  ${PROJECT_SOURCE_DIR}/thirdparty/Sophus
  ${PROJECT_SOURCE_DIR}/thirdparty/sse2neon
  ${EIGEN3_INCLUDE_DIR}
) 


# decide if we have pangolin
if (Pangolin_FOUND)
    message("--- found PANGOLIN, compiling dso_pangolin library.")
    include_directories( ${Pangolin_INCLUDE_DIRS} ) 
    set(dso_pangolin_SOURCE_FILES 
      ${PROJECT_SOURCE_DIR}/src/IOWrapper/Pangolin/KeyFrameDisplay.cpp
      ${PROJECT_SOURCE_DIR}/src/IOWrapper/Pangolin/PangolinDSOViewer.cpp)
    set(HAS_PANGOLIN 1)
else ()
    message("--- could not find PANGOLIN, not compiling dso_pangolin library.")
    message("    this means there will be no 3D display / GUI available for dso_dataset.")
    set(dso_pangolin_SOURCE_FILES )
    set(HAS_PANGOLIN 0)
endif ()

# decide if we have openCV
if (OpenCV_FOUND)
    message("--- found OpenCV, compiling dso_opencv library.")
    include_directories( ${OpenCV_INCLUDE_DIRS} )
    set(dso_opencv_SOURCE_FILES 
      ${PROJECT_SOURCE_DIR}/src/IOWrapper/OpenCV/ImageDisplay_OpenCV.cpp
      ${PROJECT_SOURCE_DIR}/src/IOWrapper/OpenCV/ImageRW_OpenCV.cpp)
    set(HAS_OPENCV 1)
else ()
    message("--- could not find OpenCV, not compiling dso_opencv library.")
    message("    this means there will be no image display, and image read / load functionality.")
    set(dso_opencv_SOURCE_FILES 
      ${PROJECT_SOURCE_DIR}/src/IOWrapper/ImageDisplay_dummy.cpp
      ${PROJECT_SOURCE_DIR}/src/IOWrapper/ImageRW_dummy.cpp)
    set(HAS_OPENCV 0)
endif ()

# decide if we have ziplib.
if (LIBZIP_LIBRARY)
    message("--- found ziplib (${LIBZIP_VERSION}), compiling with zip capability.")
    add_definitions(-DHAS_ZIPLIB=1)
    include_directories( ${LIBZIP_INCLUDE_DIR_ZIP} ${LIBZIP_INCLUDE_DIR_ZIPCONF} ) 
else()
    message("--- not found ziplib (${LIBZIP_LIBRARY}), compiling without zip capability.")
    set(LIBZIP_LIBRARY "")
endif()


# compile main library.
include_directories( ${CSPARSE_INCLUDE_DIR} ${CHOLMOD_INCLUDE_DIR}) 
cuda_add_library(dso SHARED ${dso_SOURCE_FILES} ${dso_opencv_SOURCE_FILES} ${dso_pangolin_SOURCE_FILES} 
${PROJECT_SOURCE_DIR}/src/teste.cu
)

#set_property( TARGET dso APPEND_STRING PROPERTY COMPILE_FLAGS -Wall )


if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") # OSX
    set(BOOST_THREAD_LIBRARY boost_thread-mt)
else()
    set(BOOST_THREAD_LIBRARY boost_thread)
endif()

# build main executable (only if we have both OpenCV and Pangolin)
if (OpenCV_FOUND AND Pangolin_FOUND)
    message("--- compiling dso_dataset.")
    add_executable(dso_dataset ${PROJECT_SOURCE_DIR}/src/main_dso_pangolin.cpp)
    target_link_libraries(dso_dataset dso boost_system cxsparse ${BOOST_THREAD_LIBRARY} ${LIBZIP_LIBRARY} ${Pangolin_LIBRARIES} ${OpenCV_LIBS})
else()
    message("--- not building dso_dataset, since either don't have openCV or Pangolin.")
endif()

unset(CMAKE_RUNTIME_OUTPUT_DIRECTORY)

所以,&#39; main_dso_pangolin.cpp&#39;是我的主要文件。此时,只有这个更改代码编译。但我想尝试,如果我能够制作一些CUDA代码。为了做到这一点,我创建了一个&#39; teste.cu&#39;文件,其代码与cuda样本之一相同,如下所示:

#include <stdlib.h>
#include <stdio.h>
#include <assert.h>

// CUDA runtime
#include </usr/local/cuda-9.0/include/cuda_runtime.h>
#include <cuda.h>

// helper functions and utilities to work with CUDA
#include </usr/local/cuda-9.0/samples/common/inc/helper_functions.h>
#include </usr/local/cuda-9.0/samples/common/inc/helper_cuda.h>

__global__ static void timedReduction(const float *input, float *output, clock_t *timer)
{
    // __shared__ float shared[2 * blockDim.x];
    extern __shared__ float shared[];

    const int tid = threadIdx.x;
    const int bid = blockIdx.x;

    if (tid == 0) timer[bid] = clock();

    // Copy input.
    shared[tid] = input[tid];
    shared[tid + blockDim.x] = input[tid + blockDim.x];

    // Perform reduction to find minimum.
    for (int d = blockDim.x; d > 0; d /= 2)
    {
        __syncthreads();

        if (tid < d)
        {
            float f0 = shared[tid];
            float f1 = shared[tid + d];

            if (f1 < f0)
            {
                shared[tid] = f1;
            }
        }
    }

    // Write result.
    if (tid == 0) output[bid] = shared[0];

    __syncthreads();

    if (tid == 0) timer[bid+gridDim.x] = clock();
}

#define NUM_BLOCKS    64
#define NUM_THREADS   256

void xx(int argc, char** argv){

    printf("CUDA Clock sample\n");

    // This will pick the best possible CUDA capable device
    int dev = findCudaDevice(argc, (const char **)argv);

    float *dinput = NULL;
    float *doutput = NULL;
    clock_t *dtimer = NULL;

    clock_t timer[NUM_BLOCKS * 2];
    float input[NUM_THREADS * 2];

    for (int i = 0; i < NUM_THREADS * 2; i++)
    {
        input[i] = (float)i;
    }

    checkCudaErrors(cudaMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2));
    checkCudaErrors(cudaMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS));
    checkCudaErrors(cudaMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));

    checkCudaErrors(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, cudaMemcpyHostToDevice));

    timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 *NUM_THREADS>>>(dinput, doutput, dtimer);

    checkCudaErrors(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, cudaMemcpyDeviceToHost));

    checkCudaErrors(cudaFree(dinput));
    checkCudaErrors(cudaFree(doutput));
    checkCudaErrors(cudaFree(dtimer));

    long double avgElapsedClocks = 0;

    for (int i = 0; i < NUM_BLOCKS; i++)
    {
        avgElapsedClocks += (long double) (timer[i + NUM_BLOCKS] - timer[i]);
    }

    avgElapsedClocks = avgElapsedClocks/NUM_BLOCKS;
    printf("Average clocks/block = %Lf\n", avgElapsedClocks);

}

在我的主要内容中,我做的第一件事就是调用此函数。这一次,当我做&#c; cmake&#39;并且&#39;让我得到如下错误:

/home/cesar/Documents/dso/src/teste.cu:18:21: error: ‘threadIdx’ was not declared in this scope
     const int tid = threadIdx.x;

/home/cesar/Documents/dso/src/teste.cu:19:21: error: ‘blockIdx’ was not declared in this scope
     const int bid = blockIdx.x;

我已正确安装CUDA Toolkit,但这是版本:

cesar@cesar-X550JX:/usr/local/cuda/bin$ /usr/local/cuda/bin/nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2017 NVIDIA Corporation
Built on Fri_Sep__1_21:08:03_CDT_2017
Cuda compilation tools, release 9.0, V9.0.176

你认为我做错了什么或者我错过了什么?由于其复杂性和明确的结构,我在修改CMakeLists.txt方面遇到了很多困难。

---编辑---

使用make -j运行VERBOSE = 1我收到此消息,告诉我正在使用常规c ++编译器:

/usr/bin/c++  -fPIC  -O3 -g -std=c++11 -D_FORCE_INLINES  -shared -Wl,-soname,libdso.so -o lib/libdso.so CMakeFiles/dso.dir/src/FullSystem/FullSystem.cpp.o CMakeFiles/dso.dir/src/FullSystem/FullSystemOptimize.cpp.o CMakeFiles/dso.dir/src/FullSystem/FullSystemOptPoint.cpp.o CMakeFiles/dso.dir/src/FullSystem/FullSystemDebugStuff.cpp.o CMakeFiles/dso.dir/src/FullSystem/FullSystemMarginalize.cpp.o CMakeFiles/dso.dir/src/FullSystem/Residuals.cpp.o CMakeFiles/dso.dir/src/FullSystem/CoarseTracker.cpp.o CMakeFiles/dso.dir/src/FullSystem/CoarseInitializer.cpp.o CMakeFiles/dso.dir/src/FullSystem/ImmaturePoint.cpp.o CMakeFiles/dso.dir/src/FullSystem/HessianBlocks.cpp.o CMakeFiles/dso.dir/src/FullSystem/PixelSelector2.cpp.o CMakeFiles/dso.dir/src/OptimizationBackend/EnergyFunctional.cpp.o CMakeFiles/dso.dir/src/OptimizationBackend/AccumulatedTopHessian.cpp.o CMakeFiles/dso.dir/src/OptimizationBackend/AccumulatedSCHessian.cpp.o CMakeFiles/dso.dir/src/OptimizationBackend/EnergyFunctionalStructs.cpp.o CMakeFiles/dso.dir/src/util/settings.cpp.o CMakeFiles/dso.dir/src/util/Undistort.cpp.o CMakeFiles/dso.dir/src/util/globalCalib.cpp.o CMakeFiles/dso.dir/src/IOWrapper/OpenCV/ImageDisplay_OpenCV.cpp.o CMakeFiles/dso.dir/src/IOWrapper/OpenCV/ImageRW_OpenCV.cpp.o CMakeFiles/dso.dir/src/IOWrapper/Pangolin/KeyFrameDisplay.cpp.o CMakeFiles/dso.dir/src/IOWrapper/Pangolin/PangolinDSOViewer.cpp.o CMakeFiles/dso.dir/src/dso_generated_teste.cu.o /usr/local/cuda/lib64/libcudart_static.a -lpthread -ldl -lrt

[ 96%] Building CXX object CMakeFiles/dso_dataset.dir/src/main_dso_pangolin.cpp.o
/usr/bin/c++   -DENABLE_SSE -DHAS_ZIPLIB=1 -I/usr/include/opencv -I/home/cesar/Documents/dso/src -I/home/cesar/Documents/dso/thirdparty/Sophus -I/home/cesar/Documents/dso/thirdparty/sse2neon -I/usr/include/eigen3 -I/home/cesar/Documents/Pangolin/include -I/home/cesar/Documents/Pangolin/build/src/include -I/usr/local/include -I/usr/include/suitesparse -I/usr/local/cuda/include  -O3 -g -std=c++11 -D_FORCE_INLINES   -o CMakeFiles/dso_dataset.dir/src/main_dso_pangolin.cpp.o -c /home/cesar/Documents/dso/src/main_dso_pangolin.cpp

我还尝试从.cu文件中分离.cpp文件,使用.cpp的add_library和.cu文件的cuda_add_library,如下所示:

add_library(dso ${dso_SOURCE_FILES} ${dso_opencv_SOURCE_FILES} ${dso_pangolin_SOURCE_FILES})
cuda_add_library(my_cuda_lib ${PROJECT_SOURCE_DIR}/src/teste.cu)

然后在target_link_libraries中使用my_cuda_lib,如下所示:

target_link_libraries(dso_dataset dso boost_system cxsparse ${BOOST_THREAD_LIBRARY} ${LIBZIP_LIBRARY} ${Pangolin_LIBRARIES} ${OpenCV_LIBS} ${CUDA_LIBRARIES} my_cuda_lib)

但仍然有同样的错误。

- 编辑:MCVE ---

为了演示我的错误,我创建了一个简单的例子。我有两个简单的文件,我的主要是.cpp和我的cuda文件.cu。我的主要只是在另一个文件上调用该函数,如下所示:

#include <iostream>
#include "hello_world.cu"

using namespace std;

int main()
{

    teste();

    return 0;

}

我的.cu文件看起来像这样:

#include <stdio.h>
#include <iostream>
// CUDA runtime
#include </usr/local/cuda-9.0/include/cuda_runtime.h>

// helper functions and utilities to work with CUDA
#include </usr/local/cuda-9.0/samples/common/inc/helper_functions.h>
#include </usr/local/cuda-9.0/samples/common/inc/helper_cuda.h>

__global__ void kernel (void){
  extern __shared__ float shared[];

  const int tid = threadIdx.x;
  const int bid = blockIdx.x;
}

int teste( void ) {
  kernel<<<1,1>>>();
  printf( "Hello, World!\n" ); 
  return 0;
}

我编译的CMakeLists.txt看起来像这样:

cmake_minimum_required(VERSION 2.8)
set(CUDA_HOST_COMPILER /usr/bin/g++-5)
find_package(CUDA QUIET REQUIRED)

# Pass options to NVCC
set(
    CUDA_NVCC_FLAGS
    ${CUDA_NVCC_FLAGS};
    -O3
    )

# For compilation ...
# Specify target & source files to compile it from
cuda_add_executable(
    helloworld
    hello_world.cu
    teste.cpp
)

制作cmake并运行&#34; cmake --build。&#34; (我不知道为什么它必须是这个命令,通常我只是做-j,但在这个例子中只有这个有效)我得到的错误和我的项目一样,'threadIdx'没有在这里声明范围,同样适用于&#39; blockIdx&#39;等等。

1 个答案:

答案 0 :(得分:0)

由于您在主代码中包含了hello_world.cu文件,因此您希望使用nvcc编译器编译它。要将teste.cpp文件的这个名称更改为teste.cu(否则将使用g ++)。

同时删除&#39; hello_world.cu&#39;来自CMakeLists.txt(它已经包含在teste文件中)有类似的内容:

cuda_add_executable(
    helloworld
    teste.cu
)

然后它应该工作。

- 编辑:补充问题 -

如果你想保留.cpp文件,那么你需要在g ++可以为你做什么和nvcc应该做什么之间分开。因此,您可以向项目中添加其他hello_world.h文件:

#ifndef HELLO_WORLD_H
#define HELLO_WORLD_H

int teste();

#endif

将它包含在您的teste.cpp中:

#include <iostream>
#include "hello_world.h"

using namespace std;

int main()
{

    teste();

    return 0;

}

然后你的CMakeLists.txt看起来像你原来的例子:

...
cuda_add_executable(
    helloworld
    teste.cpp
    hello_world.cu
)

在这种情况下,hello_world.cu将使用nvcc进行编译,然后compte.cpp的编译和链接将由g ++完成(由于在teste.cpp中没有CUDA代码,因此在这种情况下是可行的)。