The parallelization of matrix multiplication is basically based on the jiagnong algorithm, but with the shared memory, I think jiagnong has no advantage.
Jia Nong ensures a higher degree of parallelism when each variable is a global single copy. When shared memory is used, there is no variable replication cost, so direct use of Band Division can avoid barrier overhead in the middle of iteration and improve efficiency.
Matrix Multiplication under SMP
[Cpp]
# Include "stdafx. h"
# Include "matrixOperation. h"
# Include <omp. h>
Int _ tmain (int argc, _ TCHAR * argv [])
{
Const int size = 5000;
Double ** a, ** B, ** c;
A = new double * [size];
B = new double * [size];
C = new double * [size];
For (int I = 0; I <size; ++ I)
{
A [I] = new double [size];
B [I] = new double [size];
C [I] = new double [size];
}
Cout <"mem set" <endl;
// Read file
Cout <readMatrix ("matrix", a, size) <endl;
Cout <readMatrix ("matrix", B, size) <endl;
Cout <compareMatrix (a, B, size) <endl;
// For more cache hits
// Transposition B and place data needed in one cache block
MatrixTransposition (B, size );
Cout <"data prepared" <endl <"calculating" <endl;
Long start = time (0 );
// Omp_set_nested (true );
# Pragma omp parallel for num_threads (16) schedule (dynamic)
For (int I = 0; I <size; ++ I)
{
// # Pragma omp parallel for firstprivate (I) num_threads (4)
For (int j = 0; j <size; ++ j)
{
C [I] [j] = 0;
For (int k = 0; k <size; ++ k)
{
C [I] [j] + = a [I] [k] * B [j] [k]; // different from the original formulation
}
}
Cout <".";
}
Long end = time (0 );
Cout <end-start <"seconds" <endl;
WriteMatrix ("out", c, size );
For (int I = 0; I <size; ++ I)
{
Delete [] a [I];
Delete [] B [I];
Delete [] c [I];
}
Delete [];
Delete [] B;
Delete [] c;
Cin> start;
Return 0;
}
# Include "stdafx. h"
# Include "matrixOperation. h"
# Include <omp. h>
Int _ tmain (int argc, _ TCHAR * argv [])
{
Const int size = 5000;
Double ** a, ** B, ** c;
A = new double * [size];
B = new double * [size];
C = new double * [size];
For (int I = 0; I <size; ++ I)
{
A [I] = new double [size];
B [I] = new double [size];
C [I] = new double [size];
}
Cout <"mem set" <endl;
// Read file
Cout <readMatrix ("matrix", a, size) <endl;
Cout <readMatrix ("matrix", B, size) <endl;
Cout <compareMatrix (a, B, size) <endl;
// For more cache hits
// Transposition B and place data needed in one cache block
MatrixTransposition (B, size );
Cout <"data prepared" <endl <"calculating" <endl;
Long start = time (0 );
// Omp_set_nested (true );
# Pragma omp parallel for num_threads (16) schedule (dynamic)
For (int I = 0; I <size; ++ I)
{
// # Pragma omp parallel for firstprivate (I) num_threads (4)
For (int j = 0; j <size; ++ j)
{
C [I] [j] = 0;
For (int k = 0; k <size; ++ k)
{
C [I] [j] + = a [I] [k] * B [j] [k]; // different from the original formulation
}
}
Cout <".";
}
Long end = time (0 );
Cout <end-start <"seconds" <endl;
WriteMatrix ("out", c, size );
For (int I = 0; I <size; ++ I)
{
Delete [] a [I];
Delete [] B [I];
Delete [] c [I];
}
Delete [];
Delete [] B;
Delete [] c;
Cin> start;
Return 0;
}
On the i7 2600 processor, the parameter above the matrix multiplication of 5000*5000 is better, and the pure computing time is about 126 seconds.