以下面這個例子作為教程,實現功能是element-wise add;
(pytorch中想調用cuda模塊,還是用另外使用C編寫接口腳本)
第一步:cuda編程的源文件和頭文件
// mathutil_cuda_kernel.cu// 頭文件,最后一個是cuda特有的#include <curand.h>#include <stdio.h>#include <math.h>#include <float.h>#include "mathutil_cuda_kernel.h"http:// 獲取GPU線程通道信息dim3 cuda_gridsize(int n){ int k = (n - 1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535) { x = ceil(sqrt(k)); y = (n - 1) / (x * BLOCK) + 1; } dim3 d(x, y, 1); return d;}// 這個函數是cuda執行函數,可以看到細化到了每一個元素__global__ void broadcast_sum_kernel(float *a, float *b, int x, int y, int size){ int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if(i >= size) return; int j = i % x; i = i / x; int k = i % y; a[IDX2D(j, k, y)] += b[k];}// 這個函數是與c語言函數鏈接的接口函數void broadcast_sum_cuda(float *a, float *b, int x, int y, cudaStream_t stream){ int size = x * y; cudaError_t err; // 上面定義的函數 broadcast_sum_kernel<<<cuda_gridsize(size), BLOCK, 0, stream>>>(a, b, x, y, size); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s/n", cudaGetErrorString(err)); exit(-1); }}
#ifndef _MATHUTIL_CUDA_KERNEL#define _MATHUTIL_CUDA_KERNEL#define IDX2D(i, j, dj) (dj * i + j)#define IDX3D(i, j, k, dj, dk) (IDX2D(IDX2D(i, j, dj), k, dk))#define BLOCK 512#define MAX_STREAMS 512#ifdef __cplusplusextern "C" {#endifvoid broadcast_sum_cuda(float *a, float *b, int x, int y, cudaStream_t stream);#ifdef __cplusplus}#endif#endif
第二步:C編程的源文件和頭文件(接口函數)
// mathutil_cuda.c// THC是pytorch底層GPU庫#include <THC/THC.h>#include "mathutil_cuda_kernel.h"extern THCState *state;int broadcast_sum(THCudaTensor *a_tensor, THCudaTensor *b_tensor, int x, int y){ float *a = THCudaTensor_data(state, a_tensor); float *b = THCudaTensor_data(state, b_tensor); cudaStream_t stream = THCState_getCurrentStream(state); // 這里調用之前在cuda中編寫的接口函數 broadcast_sum_cuda(a, b, x, y, stream); return 1;}
int broadcast_sum(THCudaTensor *a_tensor, THCudaTensor *b_tensor, int x, int y);
第三步:編譯,先編譯cuda模塊,再編譯接口函數模塊(不能放在一起同時編譯)
nvcc -c -o mathutil_cuda_kernel.cu.o mathutil_cuda_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52
import osimport torchfrom torch.utils.ffi import create_extensionthis_file = os.path.dirname(__file__)sources = []headers = []defines = []with_cuda = Falseif torch.cuda.is_available(): print('Including CUDA code.') sources += ['src/mathutil_cuda.c'] headers += ['src/mathutil_cuda.h'] defines += [('WITH_CUDA', None)] with_cuda = Truethis_file = os.path.dirname(os.path.realpath(__file__))extra_objects = ['src/mathutil_cuda_kernel.cu.o'] # 這里是編譯好后的.o文件位置extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]ffi = create_extension( '_ext.cuda_util', headers=headers, sources=sources, define_macros=defines, relative_to=__file__, with_cuda=with_cuda, extra_objects=extra_objects)if __name__ == '__main__': ffi.build()
新聞熱點
疑難解答