Prior to the introduction of basic CUDA programming knowledge, then this article on the basis of the GPU in processing data calculation of the efficient performance, we take the matrix multiplied as an example.
Performs matrix multiplication and performance on 1.CPU.
The code for the Matrix multiplication operation on the CPU:
mat_mul.cc:
<span style= "Font-family:microsoft yahei;font-size:18px;" >//a[i]*b[i] + c[i] = D[i] #include <iostream> #include <vector> #include <map> #include <fstream > #include "wtime.h" using namespace std;const int N = 320;//Matrix There are two ways to express an int by using a two-dimensional matrix or a one-dimensional matrix a[n+1][n+1],b[n+1][n+1],c[n+ 1][n+1],d[n+1][n+1];int aa[(n+1) * (n+1)],bb[(n+1) * (n+1)],cc[(n+1) * (n+1)],dd[(n+1) * (n+1)];void init () {for (int i=0;i <n;i++) for (int j=0;j<n;j++) {A[i][j] = 1;b[i][j] = 2;c[i][j] = 3;}} void Init1 () {for (Int. i=0;i<n;i++) for (int j=0;j<n;j++) {Aa[i*n+j] = 1;bb[i*n+j] = 2;cc[i*n+j] = 3;}} void Mul () {for (int i=0;i<n;i++) for (int. j=0;j<n;j++) {for (int k=0;k<n;k++) {D[i][j] + = a[i][k] * b[k][j];} D[I][J] + = c[i][j]; }}void Mul1 () {for (Int. i=0;i<n;i++) for (int. j=0;j<n;j++) {for (int k=0;k<n;k++) {Dd[i*n+j] + = aa[i*n+k] * bb[k*n+j ];} DD[N*I+J] + = cc[n*i+j]; }}void print () {Ofstream fout;fout.open ("Result.txt"), if (!fout) {perror ("Can not open the file");} for (int i=0;i<n;i++) {for (int j=0; j<n;j++) {fout<<d[i][j]<< ""; } Fout<<endl;} Fout.close ();} int main () {init1 ();d ouble t = wtime (); mul1 (); t = Wtime ()-t;printf ("computation timing =%10.10f sec\n", t);//print (); return 0;} <strong></strong></span>
wtime.h:
<span style= "Font-family:microsoft yahei;font-size:18px;" > #ifndef _wtime_#define _wtime_double wtime (); #endif </span>
wtime.cc:
<span style= "Font-family:microsoft yahei;font-size:18px;" > #include <stdio.h> #include <sys/time.h> #include <iostream> #include <cstdlib>double Wtime (void) {double now_time;struct timeval etstart;struct timezone tzp;if (gettimeofday (&ETSTART,&TZP) ==-1) { Perror ("Error:calling gettimeofday () not successfully.\n");} Now_time = (double) etstart.tv_sec) + ((double) etstart.tv_usec)/1000000.0;return now_time;} #if 0int Main () {double time;time = wtime ();p rintf ("time of day =%10.4f\n", time); return 0;} #endif </span>
Makefile
<span style= "Font-family:microsoft yahei;font-size:18px;" >target:g++ mat_mul.cc wtime.cc./a.out</span>
Results:
Performs matrix multiplication and performance on 2.GPU.
Code:
CUDA_MAT_MUL_V1.CU:
<span style= "Font-family:microsoft yahei;font-size:18px;" >//matrix multiplication with global memory #include <iostream> #include <fstream> #include "wtime.h" using namespace Std;const int block_size = 16;const int grid_size = 20;<strong>//d = A * B + c;__global__ </stron G>void mat_mul (int *da,int *db,int *dc,int *dd,int N) {int row = blockidx.y * blockdim.y + threadidx.y;int col = blockId x.x * blockdim.x + threadidx.x;int sum = 0;for (int i=0;i<n;i++) {sum + = Da[row*n + i] * Db[row*i+col];} Dd[row*n + col] = sum + dc[row*n + col];} int main () {int N = block_size * Grid_size;int *ha,*hb,*hc,*hd;int *da,*db,*dc,*dd;double time;ha = new INT[N*N];HB = new I NT[N*N];HC = new INT[N*N];HD = new int[n*n];cudaerror_t err;//initializefor (int i=0;i<n;i++) for (int j=0;j<n;j++) { HA[I*N+J] = 1;hb[i*n+j] = 2;hc[i*n+j] = 3;} <strong>//malloc</strong>cudamalloc (&da,n*n*sizeof (int)); Cudamalloc (&db,N*N*sizeof (int)); Cudamalloc (&dc,n*n*sizeof (int)); err = CudAmalloc (&dd,n*n*sizeof (int.));p rintf ("Cuda Malloc C:%s\n", cudageterrorstring (Err)); <strong>//host to device</strong>cudamemcpy (da,ha,n*n*sizeof (int), cudamemcpyhosttodevice); cudamemcpy (db,hb,N*N*sizeof (int) , Cudamemcpyhosttodevice); cudamemcpy (dc,hc,n*n*sizeof (int), cudamemcpyhosttodevice); cudamemcpy (Dd,hd,N*N*sizeof ( int), cudamemcpyhosttodevice), <strong>dim3 threadblock (block_size,block_size);d im3 grid (grid_size,grid_size ),//kernel</strong>time = Wtime ();mat_mul<<<grid,threadblock>>> (Da,db,dc,dd,N);p rintf (" Computation time is%10.10f\n ", Wtime ()-time); <strong>//device to host</strong>cudamemcpy (hd,dd,N*N* sizeof (int), cudamemcpydevicetohost); <strong>//print result to File</strong>ofstream Fout;fout.open (" Result_v1.txt "), if (!fout) {cerr<<" Open the file Error "<<endl;exit (-1);} for (int i=0;i<n;i++) {for (int j=0;j<n;j++) {fout<cuda_wtime.cu:<span style= "Font-family:microsoft yahei;font-size:18px;" > #include <stdio.h> #include <sys/time.h> #include <iostream> #include <cstdlib>double Wtime (void) {double now_time;struct timeval etstart;struct timezone tzp;if (gettimeofday (&ETSTART,&TZP) ==-1) { Perror ("Error:calling gettimeofday () not successfully.\n");} Now_time = (double) etstart.tv_sec) + ((double) etstart.tv_usec)/1000000.0;return now_time;} #if 0int Main () {double time;time = wtime ();p rintf ("time of day =%10.4f\n", time); return 0;} #endif <strong></strong></span>
Wtime.h:
<span style= "Font-family:microsoft yahei;font-size:18px;" > #ifndef _wtime_#define _wtime_double wtime (); #endif </span>
cuda_wtime.cu:<span style= "Font-family:microsoft yahei;font-size:18px;" > #include <stdio.h> #include <sys/time.h> #include <iostream> #include <cstdlib>double Wtime (void) {double now_time;struct timeval etstart;struct timezone tzp;if (gettimeofday (&ETSTART,&TZP) ==-1) { Perror ("Error:calling gettimeofday () not successfully.\n");} Now_time = (double) etstart.tv_sec) + ((double) etstart.tv_usec)/1000000.0;return now_time;} #if 0int Main () {double time;time = wtime ();p rintf ("time of day =%10.4f\n", time); return 0;} #endif </span>
Makefile
<span style= "Font-family:microsoft yahei;font-size:18px;" >CU:NVCC cuda_mat_mul_v1.cu cuda_wtime.cu./a.out</span>
results:
3. Calculation Performance Comparison:
Matrix size |
1600*1600 |
1200*1200 |
800*800 |
320*320 |
Serial Time/S |
30.9 |
11.49865 |
2.597987 |
0.162311 |
Parallel time |
Grid=100/block=16 |
Grid=75/block=16 |
Grid=50/block=16 |
Grid=20/block=16 |
Kernel Execution Time/s |
0.0000319 |
0.0000309944 |
0.0000309944 |
0.0000231266 |
Total time in parallel calculation (allocate memory plus + data copy + Compute)/s |
0.70796 |
0.439213 |
0.310214 |
0.237676 |
It can be seen that the large size of the matrix is very obvious when the GPU powerful computing power.
Annotated Source: http://blog.csdn.net/lavorange/article/details/41896591
"Cuda parallel programming Four" matrix multiplication