首页
学习
活动
专区
圈层
工具
发布
首页
学习
活动
专区
圈层
工具
MCP广场
社区首页 >问答首页 >LU因式分解在LAPACK和cuBLAS/cuSOLVER之间得到不同的结果

LU因式分解在LAPACK和cuBLAS/cuSOLVER之间得到不同的结果
EN

Stack Overflow用户
提问于 2019-11-11 10:56:46
回答 1查看 520关注 0票数 0

我正在测试一些场景,在这些场景中,与为dgetrf编写函数相比,与cuBLAS/cuSOLVER一起使用时,函数LAPACK的返回方式有所不同。例如,我正在研究以下矩阵的LU分解:

2.0 4.0 1.0 -3.0 0.0 -1.0 -2.0 2.0 4.0 0.0 4.0 2.0 -3.0 5.0 0.0 5.0 -4.0 -3.0 1.0 0.0 0.0 0.0 0.0

我首先尝试按下面的方式从dgetrf调用cuBLAS/cuSOLVER (前面是警告、丑陋的测试代码!)

代码语言:javascript
运行
复制
    #include <cblas.h>
    #include <time.h>
    #include <stdio.h>
    #include <string.h>
    #include <cuda_runtime.h>
    #include <cublas_v2.h>
    #include <cusolverDn.h>

    int main(int argc, char** argv){

        const int matrixSize = 5;

        int i, j;
        double arrA[matrixSize][matrixSize] = {
            {2.0, 4.0, 1.0, -3.0, 0.0},
            {-1.0, -2.0, 2.0, 4.0, 0.0},
            {4.0, 2.0, -3.0, 5.0, 0.0},
            {5.0, -4.0, -3.0, 1.0, 0.0},
            {0.0, 0.0, 0.0, 0.0, 0.0}
        };

        double *arrADev, *workArray;
        double **matrixArray;
        int *pivotArray;
        int *infoArray;
        double flat[matrixSize*matrixSize] = {0};
        cublasHandle_t cublasHandle;
        cublasStatus_t cublasStatus;
        cudaError_t error;

        cudaError cudaStatus;
        cusolverStatus_t cusolverStatus;
        cusolverDnHandle_t cusolverHandle;

        double *matrices[2];


        error = cudaMalloc(&arrADev,  sizeof(double) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&matrixArray,  sizeof(double*) * 2);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&pivotArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&infoArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        cublasStatus = cublasCreate(&cublasHandle);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //maps matrix to flat vector
        for(i=0; i<matrixSize; i++){
            for(j=0; j<matrixSize; j++){
                flat[i+j*matrixSize] = arrA[i][j];
            }
        }

        //copy matrix A to device
        cublasStatus = cublasSetMatrix(matrixSize, matrixSize, sizeof(double), flat, matrixSize, arrADev, matrixSize);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //save matrix address
        matrices[0] = arrADev;

        //copy matrices references to device
        error = cudaMemcpy(matrixArray, matrices, sizeof(double*)*1, cudaMemcpyHostToDevice);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        int Lwork;
        // calculate buffer size for cuSOLVER LU factorization
        cusolverStatus = cusolverDnDgetrf_bufferSize(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, &Lwork);
        cudaStatus = cudaMalloc((void**)&workArray, Lwork*sizeof(double));

        // cuBLAS LU factorization
        cublasStatus = cublasDgetrfBatched(cublasHandle, matrixSize, matrixArray, matrixSize, pivotArray, infoArray, 1);
        if (cublasStatus == CUBLAS_STATUS_SUCCESS)
            printf("cuBLAS DGETRF SUCCESSFUL! \n");
        else
            printf("cuBLAS DGETRF UNSUCCESSFUL! \n");

        // cuSOLVER LU factorization
        cusolverStatus = cusolverDnCreate(&cusolverHandle);
        cusolverStatus = cusolverDnDgetrf(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, workArray, pivotArray, infoArray);
        if (cusolverStatus == CUSOLVER_STATUS_SUCCESS)
            printf("cuSOLVER DGETRF SUCCESSFUL! \n");
        else
            printf("cuSOLVER DGETRF UNSUCCESSFUL! \n");

        return 0;
    }

上面代码的输出是

代码语言:javascript
运行
复制
    cuBLAS DGETRF SUCCESSFUL!
    cuSOLVER DGETRF SUCCESSFUL!

当我试图对LAPACK做同样的事情时(警告:更丑陋的代码!):

代码语言:javascript
运行
复制
    #include <iostream>
    #include <vector>

    using namespace std;

    extern "C" void dgetrf_(int* dim1, int* dim2, double* a, int* lda, int* ipiv, int* info);
    extern "C" void dgetrs_(char *TRANS, int *N, int *NRHS, double *A, int *LDA, int *IPIV, double *B, int *LDB, int *INFO );

    int main()
    {
       char trans = 'N';
       int dim = 5;
       int LDA = dim;
       int info;

       vector<double> a,b;

       a.push_back(2.0); a.push_back(4.0); a.push_back(1.0); a.push_back(-3.0); a.push_back(0.0);
       a.push_back(-1.0); a.push_back(-2.0); a.push_back(2.0); a.push_back(4.0); a.push_back(0.0);
       a.push_back(4.0); a.push_back(2.0); a.push_back(-3.0); a.push_back(5.0); a.push_back(0.0);
       a.push_back(5.0); a.push_back(-4.0); a.push_back(-3.0); a.push_back(1.0); a.push_back(0.0);
       a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0);

       int ipiv[5];
       dgetrf_(&dim, &dim, &*a.begin(), &LDA, ipiv, &info);
       if (info == 0)
           printf("dgetrf successful\n");
       else
           printf("dgetrf unsuccessful\n");

       return 0;
    }

我得到的输出是

代码语言:javascript
运行
复制
    dgetrf unsuccessful

我知道他们是不同的图书馆,但这种行为是否被期望?

EN

回答 1

Stack Overflow用户

回答已采纳

发布于 2019-11-11 15:48:18

当我编译您的CUDA代码时,会收到一个警告,即在设置缓冲句柄的值之前正在使用它。您不应该忽略这样的警告,因为您在sizing函数中的用法是不正确的。然而,这并不是问题所在。

我不认为你的两个测试用例有什么区别。你似乎不正确地解释了结果。

查看netlib文档,我们看到info值为5平均U(5,5)为零,这对于将来的使用将是有问题的。这并不意味着dgetrf分解在打印出来时是成功的或不成功的,而是意味着有关输入数据的一些内容。事实上,正如文档中明确指出的那样,分解已经完成。

同样,我们只通过查看缓冲器函数的返回值就无法获得有关该条件的任何信息。为了发现与其所报道的类似的信息。

通过这些更改,您的代码将报告相同的内容(信息值为5):

代码语言:javascript
运行
复制
$ cat t1556.cu
    #include <time.h>
    #include <stdio.h>
    #include <string.h>
    #include <cuda_runtime.h>
    #include <cublas_v2.h>
    #include <cusolverDn.h>

    int main(int argc, char** argv){

        const int matrixSize = 5;

        int i, j;
        double arrA[matrixSize][matrixSize] = {
            {2.0, 4.0, 1.0, -3.0, 0.0},
            {-1.0, -2.0, 2.0, 4.0, 0.0},
            {4.0, 2.0, -3.0, 5.0, 0.0},
            {5.0, -4.0, -3.0, 1.0, 0.0},
            {0.0, 0.0, 0.0, 0.0, 0.0}
        };

        double *arrADev, *workArray;
        double **matrixArray;
        int *pivotArray;
        int *infoArray;
        double flat[matrixSize*matrixSize] = {0};
        cublasHandle_t cublasHandle;
        cublasStatus_t cublasStatus;
        cudaError_t error;

        cudaError cudaStatus;
        cusolverStatus_t cusolverStatus;
        cusolverDnHandle_t cusolverHandle;

        double *matrices[2];


        error = cudaMalloc(&arrADev,  sizeof(double) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&matrixArray,  sizeof(double*) * 2);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&pivotArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        error = cudaMalloc(&infoArray,  sizeof(int) * matrixSize*matrixSize);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        cublasStatus = cublasCreate(&cublasHandle);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //maps matrix to flat vector
        for(i=0; i<matrixSize; i++){
            for(j=0; j<matrixSize; j++){
                flat[i+j*matrixSize] = arrA[i][j];
            }
        }

        //copy matrix A to device
        cublasStatus = cublasSetMatrix(matrixSize, matrixSize, sizeof(double), flat, matrixSize, arrADev, matrixSize);
        if (cublasStatus != CUBLAS_STATUS_SUCCESS) fprintf(stderr,"error %i\n",cublasStatus);

        //save matrix address
        matrices[0] = arrADev;

        //copy matrices references to device
        error = cudaMemcpy(matrixArray, matrices, sizeof(double*)*1, cudaMemcpyHostToDevice);
        if (error != cudaSuccess) fprintf(stderr,"\nError: %s\n",cudaGetErrorString(error));

        int Lwork;
        // calculate buffer size for cuSOLVER LU factorization
        cusolverStatus = cusolverDnCreate(&cusolverHandle);
        cusolverStatus = cusolverDnDgetrf_bufferSize(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, &Lwork);
        cudaStatus = cudaMalloc((void**)&workArray, Lwork*sizeof(double));

        // cuBLAS LU factorization
        cublasStatus = cublasDgetrfBatched(cublasHandle, matrixSize, matrixArray, matrixSize, pivotArray, infoArray, 1);
        if (cublasStatus == CUBLAS_STATUS_SUCCESS)
            printf("cuBLAS DGETRF SUCCESSFUL! \n");
        else
            printf("cuBLAS DGETRF UNSUCCESSFUL! \n");

        // cuSOLVER LU factorization
        cusolverStatus = cusolverDnDgetrf(cusolverHandle, matrixSize, matrixSize, arrADev, matrixSize, workArray, pivotArray, infoArray);
        if (cusolverStatus == CUSOLVER_STATUS_SUCCESS)
            printf("cuSOLVER DGETRF SUCCESSFUL! \n");
        else
            printf("cuSOLVER DGETRF UNSUCCESSFUL! \n");
        int *hinfoArray = (int *)malloc(matrixSize*matrixSize*sizeof(int));
        cudaMemcpy(hinfoArray, infoArray, matrixSize*matrixSize*sizeof(int), cudaMemcpyDeviceToHost);
        for (int i = 0; i < matrixSize*matrixSize; i++) printf("%d,", hinfoArray[i]);
        printf("\n");
        return 0;
    }
$ nvcc -o t1556 t1556.cu -lcublas -lcusolver
t1556.cu(30): warning: variable "cudaStatus" was set but never used

$ ./t1556
cuBLAS DGETRF SUCCESSFUL!
cuSOLVER DGETRF SUCCESSFUL!
5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
$ cat t1557.cpp
    #include <iostream>
    #include <vector>
    #include <lapacke/lapacke.h>
    using namespace std;

//    extern "C" void dgetrf_(int* dim1, int* dim2, double* a, int* lda, int* ipiv, int* info);
//    extern "C" void dgetrs_(char *TRANS, int *N, int *NRHS, double *A, int *LDA, int *IPIV, double *B, int *LDB, int *INFO );

    int main()
    {
       char trans = 'N';
       int dim = 5;
       int LDA = dim;
       int info;

       vector<double> a,b;

       a.push_back(2.0); a.push_back(4.0); a.push_back(1.0); a.push_back(-3.0); a.push_back(0.0);
       a.push_back(-1.0); a.push_back(-2.0); a.push_back(2.0); a.push_back(4.0); a.push_back(0.0);
       a.push_back(4.0); a.push_back(2.0); a.push_back(-3.0); a.push_back(5.0); a.push_back(0.0);
       a.push_back(5.0); a.push_back(-4.0); a.push_back(-3.0); a.push_back(1.0); a.push_back(0.0);
       a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0); a.push_back(0.0);

       int ipiv[5];
       LAPACK_dgetrf(&dim, &dim, &*a.begin(), &LDA, ipiv, &info);
       printf("info = %d\n", info);
       if (info == 0)
           printf("dgetrf successful\n");
       else
           printf("dgetrf unsuccessful\n");

       return 0;
    }
$ g++ t1557.cpp -o t1557 -llapack
$ ./t1557
info = 5
dgetrf unsuccessful
$

我正在使用centOS安装的漏洞。

centOS 7,CUDA 10.1.243,Tesla V100.

票数 2
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/58800010

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档