我正在尝试使用PyCUDA连接稀疏的cuSOLVER例程 cusolverSpDcsrlsvqr()(> = CUDA 7.0)并面临一些困难: 我尝试使用与scikits-cuda(https://github.com/lebedov/scikits.cuda/blob/master/scikits/cuda/cusolver.py)中包含的密集cuSolver例程相同的方式包装方法。
但是,在调用cusolverSpDcsrlsvqr()函数时,代码会因分段错误而崩溃。
使用cuda-gdb(cuda-gdb --args python -m pycuda.debug test.py; run;bt
)进行调试会产生以下堆栈跟踪,
来自/usr/local/cuda/lib64/libcusolver.so的cusolverSpXcsrissymHost()中的#0> 0x00007fffd9e3b71a 来自/usr/local/cuda/lib64/libcusolver.so的hsolverXcsrqr_zeroPivot()中的#1 0x00007fffd9df5237 来自/usr/local/cuda/lib64/libcusolver.so的hsolverXcsrqr_analysis_coletree()中的#2 0x00007fffd9e0c764 来自/usr/local/cuda/lib64/libcusolver.so的cusolverXcsrqr_analysis()中的#3 0x00007fffd9f160a0 来自/usr/local/cuda/lib64/libcusolver.so的cusolverSpScsrlsvqr()中的#4 0x00007fffd9f28d78
这很奇怪,因为我没有调用cusolverSp S csrlsvqr(),我认为它也不应该调用主机函数(cusolverSpXcsrissym 主机)。
这是我正在谈论的代码 - 感谢您的帮助:
# ### Interface cuSOLVER PyCUDA
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import scipy.sparse as sp
import ctypes
# #### wrap the cuSOLVER cusolverSpDcsrlsvqr() using ctypes
# cuSparse
_libcusparse = ctypes.cdll.LoadLibrary('libcusparse.so')
class cusparseMatDescr_t(ctypes.Structure):
_fields_ = [
('MatrixType', ctypes.c_int),
('FillMode', ctypes.c_int),
('DiagType', ctypes.c_int),
('IndexBase', ctypes.c_int)
]
_libcusparse.cusparseCreate.restype = int
_libcusparse.cusparseCreate.argtypes = [ctypes.c_void_p]
_libcusparse.cusparseDestroy.restype = int
_libcusparse.cusparseDestroy.argtypes = [ctypes.c_void_p]
_libcusparse.cusparseCreateMatDescr.restype = int
_libcusparse.cusparseCreateMatDescr.argtypes = [ctypes.c_void_p]
# cuSOLVER
_libcusolver = ctypes.cdll.LoadLibrary('libcusolver.so')
_libcusolver.cusolverSpCreate.restype = int
_libcusolver.cusolverSpCreate.argtypes = [ctypes.c_void_p]
_libcusolver.cusolverSpDestroy.restype = int
_libcusolver.cusolverSpDestroy.argtypes = [ctypes.c_void_p]
_libcusolver.cusolverSpDcsrlsvqr.restype = int
_libcusolver.cusolverSpDcsrlsvqr.argtypes= [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
cusparseMatDescr_t,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_double,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p]
#### Prepare the matrix and parameters, copy to Device via gpuarray
# coo to csr
val = np.arange(1,5,dtype=np.float64)
col = np.arange(0,4,dtype=np.int32)
row = np.arange(0,4,dtype=np.int32)
A = sp.coo_matrix((val,(row,col))).todense()
Acsr = sp.csr_matrix(A)
b = np.ones(4)
x = np.empty(4)
print('A:' + str(A))
print('b: ' + str(b))
dcsrVal = gpuarray.to_gpu(Acsr.data)
dcsrColInd = gpuarray.to_gpu(Acsr.indices)
dcsrIndPtr = gpuarray.to_gpu(Acsr.indptr)
dx = gpuarray.to_gpu(x)
db = gpuarray.to_gpu(b)
m = ctypes.c_int(4)
nnz = ctypes.c_int(4)
descrA = cusparseMatDescr_t()
reorder = ctypes.c_int(0)
tol = ctypes.c_double(1e-10)
singularity = ctypes.c_int(99)
#create cusparse handle
_cusp_handle = ctypes.c_void_p()
status = _libcusparse.cusparseCreate(ctypes.byref(_cusp_handle))
print('status: ' + str(status))
cusp_handle = _cusp_handle.value
#create MatDescriptor
status = _libcusparse.cusparseCreateMatDescr(ctypes.byref(descrA))
print('status: ' + str(status))
#create cusolver handle
_cuso_handle = ctypes.c_void_p()
status = _libcusolver.cusolverSpCreate(ctypes.byref(_cuso_handle))
print('status: ' + str(status))
cuso_handle = _cuso_handle.value
print('cusp handle: ' + str(cusp_handle))
print('cuso handle: ' + str(cuso_handle))
### Call solver
_libcusolver.cusolverSpDcsrlsvqr(cuso_handle,
m,
nnz,
descrA,
int(dcsrVal.gpudata),
int(dcsrIndPtr.gpudata),
int(dcsrColInd.gpudata),
int(db.gpudata),
tol,
reorder,
int(dx.gpudata),
ctypes.byref(singularity))
# destroy handles
status = _libcusolver.cusolverSpDestroy(cuso_handle)
print('status: ' + str(status))
status = _libcusparse.cusparseDestroy(cusp_handle)
print('status: ' + str(status))
答案 0 :(得分:3)
将descrA
设置为ctypes.c_void_p()
并将cusparseMatDescr_t
包装中的cusolverSpDcsrlsvqr
替换为ctypes.c_void_p
可以解决问题。