Cuda parallel programming of four matrix multiplication __ Parallel Computing

Source: Internet
Author: User

The previous introduction of basic CUDA programming knowledge, then this article to see the GPU in the processing of data calculation of the efficiency, we take the matrix multiplication as an example.


performs matrix multiplication and performance on 1.CPU.

Code for matrix multiplication on the CPU:

mat_mul.cc:

A[i]*b[i] + c[i] = D[i] #include <iostream> #include <vector> #include <map> #include <fstream> #

Include "Wtime.h" using namespace std;

const int N = 320;
The two methods of the matrix expression are the two-dimensional matrices or the one-dimensional matrices for int a[n+1][n+1],b[n+1][n+1],c[n+1][n+1],d[n+1][n+1];

int aa[(n+1) * (n+1)],bb[(n+1) * (n+1)],cc[(n+1) * (n+1)],dd[(n+1) * (n+1)];
			void Init () {for (int i=0;i<n;i++) for (int j=0;j<n;j++) {A[i][j] = 1;
			B[I][J] = 2;
		C[I][J] = 3;
			} void Init1 () {for (int i=0;i<n;i++) for (int j=0;j<n;j++) {Aa[i*n+j] = 1;
			BB[I*N+J] = 2;
		CC[I*N+J] = 3; } void Mul () {for (int i=0;i<n;i++) for (int j=0;j<n;j++) {for (int k=0;k<n;k++) {D[i][j] + = a[
		I][K] * B[k][j];
	  } D[i][j] + = c[i][j];  } void Mul1 () {for (int i=0;i<n;i++) for (int j=0;j<n;j++) {for (int k=0;k<n;k++) {Dd[i*n+j] =
		AA[I*N+K] * Bb[k*n+j];
	  } Dd[n*i+j] + = cc[n*i+j];
	} void Print () {ofstream fout;
	Fout.open ("Result.txt");
if (!fout)	{perror ("Can not open the file");
	  for (int i=0;i<n;i++) {for (int j=0;j<n;j++) {fout<<d[i][j]<< ";
	} fout<<endl;
} fout.close ();	
	
	int main () {init1 ();
	Double T = wtime ();
	MUL1 ();
	t = wtime ()-t;
	
	printf ("Computation timing =%10.10f sec\n", t);

	Print ();
return 0; }
wtime.h:

#ifndef _wtime_
#define _WTIME_

double wtime ();

#endif


wtime.cc:

#include <stdio.h>
#include <sys/time.h>
#include <iostream>
#include <cstdlib >

double wtime (void)
{
	double now_time;
	struct Timeval etstart;
	struct timezone tzp;

	if (Gettimeofday (&ETSTART,&TZP) ==-1)
	{
		perror ("error:calling gettimeofday () not successfully.\n" );
	}

	Now_time = ((double) etstart.tv_sec) + ((double) etstart.tv_usec)/1000000.0;

	return now_time;
}

#if 0
int main ()
{
	double time;
	Time = Wtime ();

	printf ("Time ' =%10.4f\n", time);

	return 0;
}
#endif

Makefile:

Target:
	g++ mat_mul.cc wtime.cc
	./a.out

Results:



Performs matrix multiplication and performance on 2.GPU.

Code:

cuda_mat_mul_v1.cu:

Matrix multiplication with global memory #include <iostream> #include <fstream> #include "wtime.h" using Nam


Espace std;
const int block_size = 16;

const int grid_size = 20;
D = A * B + C;
	__global__ void Mat_mul (int *da,int *db,int *dc,int *dd,int N) {int row = blockidx.y * blockdim.y + threadidx.y;

	int col = blockidx.x * blockdim.x + threadidx.x;
	int sum = 0;
	for (int i=0;i<n;i++) {sum + = Da[row*n + i] * Db[row*i+col];
} dd[row*n + col] = sum + dc[row*n + col];
	int main () {int N = block_size * grid_size;
	int *HA,*HB,*HC,*HD;
	int *da,*db,*dc,*dd;
	Double time;
	ha = new Int[n*n];
	HB = new Int[n*n];
	HC = new Int[n*n];
	HD = new Int[n*n];

	cudaerror_t err;
			Initialize for (int i=0;i<n;i++) for (int j=0;j<n;j++) {Ha[i*n+j] = 1;
			HB[I*N+J] = 2;
		HC[I*N+J] = 3;
	}//malloc</strong> Cudamalloc (&da,n*n*sizeof (int));
	Cudamalloc (&db,n*n*sizeof (int));
	Cudamalloc (&dc,n*n*sizeof (int)); Err = Cudamalloc (&dd,n*n*sizeof (int));

	printf ("Cuda Malloc C:%s\n", cudageterrorstring (err));
	Host to Device cudamemcpy (da,ha,n*n*sizeof (int), cudamemcpyhosttodevice);
	cudamemcpy (db,hb,n*n*sizeof (int), cudamemcpyhosttodevice);
	cudamemcpy (dc,hc,n*n*sizeof (int), cudamemcpyhosttodevice);

	cudamemcpy (dd,hd,n*n*sizeof (int), cudamemcpyhosttodevice);
	DIM3 Threadblock (block_size,block_size);
	Dim3 grid (grid_size,grid_size);
	Kernel time = Wtime ();
	Mat_mul<<<grid,threadblock>>> (Da,db,dc,dd,n);

	printf ("Computation time is%10.10f\n", Wtime ()-time);

	Device to host cudamemcpy (hd,dd,n*n*sizeof (int), cudamemcpydevicetohost);
	Print result to file Ofstream fout;
	Fout.open ("Result_v1.txt");
		if (!fout) {cerr<< "Open the file error" <<endl;
	Exit (-1);
		for (int i=0;i<n;i++) {for (int j=0;j<n;j++) {fout< 
cuda_wtime.cu: 

#include <stdio.h>
#include <sys/time.h>
#include <iostream>
#include <cstdlib >

double wtime (void)
{
	double now_time;
	struct Timeval etstart;
	struct timezone tzp;

	if (Gettimeofday (&ETSTART,&TZP) ==-1)
	{
		perror ("error:calling gettimeofday () not successfully.\n" );
	}

	Now_time = ((double) etstart.tv_sec) + ((double) etstart.tv_usec)/1000000.0;

	return now_time;
}

#if 0
int main ()
{
	double time;
	Time = Wtime ();

	printf ("Time ' =%10.4f\n", time);

	return 0;
}
#endif
wtime.h:
#ifndef _wtime_
#define _WTIME_

double wtime ();

#endif


cuda_wtime.cu:

#include <stdio.h>
#include <sys/time.h>
#include <iostream>
#include <cstdlib >

double wtime (void)
{
	double now_time;
	struct Timeval etstart;
	struct timezone tzp;

	if (Gettimeofday (&ETSTART,&TZP) ==-1)
	{
		perror ("error:calling gettimeofday () not successfully.\n") ;
	}

	Now_time = ((double) etstart.tv_sec) + ((double) etstart.tv_usec)/1000000.0;

	return now_time;
}

#if 0
int main ()
{
	double time;
	Time = Wtime ();

	printf ("Time ' =%10.4f\n", time);

	return 0;
}
#endif

Makefile

CU:
	nvcc cuda_mat_mul_v1.cu cuda_wtime.cu
	./a.out

Results:



3. Calculation Performance Comparison:

Matrix Size 1600*1600 1200*1200 800*800 320*320
serial time /s 30.9 11.49865 2.597987 0.162311
Parallel Time grid=100/block=16 Grid=75/block=16 Grid=50/block=16 Grid=20/block=16
Kernel Execution Time /s 0.0000319 0.0000309944 0.0000309944 0.0000231266
Parallel Computing Total time (allocating memory plus + data copy + calculation)/ s 0.70796 0.439213 0.310214 0.237676

Visible, when the matrix is large, it is obvious that the GPU's powerful computational power.


Annotated origin: http://blog.csdn.net/lavorange/article/details/41896591



Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.