Today, I have nothing to worry about. I suddenly want to test the efficiency of the three commonly used languages C #, C ++, and Java in floating point operations, guess what the result is. The first is the algorithm. It calculates a 100*100 Mandelbrot set and calls 1000 consecutive times for timing. The algorithm comes from: Compile (here we will praise V8, which is the fastest JS Engine, run this Benchmark, 1.023 seconds, IE10 is still growing, 1.163 seconds, the gap is not big, but IE9, 9 seconds ......). OK, back to the question, the following is the code used for testing: first, the king of C language:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int qset(float xx,float yy,float u,float v){
int n;
float t,xsqr,ysqr;
int lim = 100;
float x = xx;
float y = yy;
xsqr = x*x;
ysqr = y*y;
for(n = 0;(n < lim) && (xsqr + ysqr < 4.0);n++){
t = xsqr - ysqr + u;
y = 2.0 * x * y +v;
x = t;
xsqr = t * t;
ysqr = y * y;
}
return n;
}
int mb100(){
int dots = 0;
int res = 100;
float a1 = -2.50;
float b1 = -1.75;
float s = 3.05;
float x = 0;
float y = 0;
float g = s / res;
int i,j,k;
float a,b;
for(j = 0,b = b1;j <res; b += g,j++){
for(i = 0,a = a1;i < res;a += g,i++){
k = qset(x,y,a,b);
if(k > 90) {
dots++;
}
}
}
return dots;
}
int main(int argc, char **argv)
{
clock_t start,finish;
start=clock();
for(int i=0;i<1000;i++){
mb100();
}
finish=clock();
long duration=finish-start;
printf("%ld",duration);//1810 ms
getchar();
return 0;
}
The following is the most popular JAVA:
public class Main {
public static void main(String args[]) throws Exception{
long start=System.currentTimeMillis();
for(int i=0;i<1000;i++){
mb100();
}
long finish=System.currentTimeMillis();
System.out.println(finish-start); //865ms
}
static int qset(float xx, float yy, float u, float v){
int n;
float t, xsqr, ysqr;
int lim = 100;
float x = xx;
float y = yy;
xsqr = x * x;
ysqr = y * y;
for (n = 0; (n < lim) && (xsqr + ysqr < 4.0); n++)
{
t = xsqr - ysqr + u;
y = 2.0f * x * y + v;
x = t;
xsqr = t * t;
ysqr = y * y;
}
return n;
}
static int mb100(){
int dots = 0;
int res = 100;
float a1 = -2.50f;
float b1 = -1.75f;
float s = 3.05f;
float x = 0;
float y = 0;
float g = s / res;
int i, j, k;
float a, b;
for (j = 0, b = b1; j < res; b += g, j++)
{
for (i = 0, a = a1; i < res; a += g, i++)
{
k = qset(x, y, a, b);
if (k > 90)
{
dots++;
}
}
}
return dots;
}
}
Finally, C # With the highest development efficiency #
#region Quote
using System;
using System.Diagnostics;
#endregion
namespace MBTest
{
internal class Program
{
private static void Main (string [] args)
{
Stopwatch watch = new Stopwatch ();
watch.Start ();
for (int i = 0; i <1000; i ++)
{
MB100 ();
}
watch.Stop ();
Console.WriteLine (watch.ElapsedMilliseconds); // 1810 ms
Console.ReadKey ();
}
private static int QSet (float xx, float yy, float u, float v)
{
int n;
float t, xsqr, ysqr;
int lim = 100;
float x = xx;
float y = yy;
xsqr = x * x;
ysqr = y * y;
for (n = 0; (n <lim) && (xsqr + ysqr <4.0); n ++)
{
t = xsqr-ysqr + u;
y = 2.0f * x * y + v;
x = t;
xsqr = t * t;
ysqr = y * y;
}
return n;
}
private static int MB100 ()
{
int dots = 0;
int res = 100;
float a1 = -2.50f;
float b1 = -1.75f;
float s = 3.05f;
float x = 0;
float y = 0;
float g = s / res;
int i, j, k;
float a, b;
for (j = 0, b = b1; j <res; b + = g, j ++)
{
for (i = 0, a = a1; i <res; a + = g, i ++)
{
k = QSet (x, y, a, b);
if (k> 90)
{
dots ++;
}
}
}
return dots;
}
}
}
The output result has been commented out in the Code. What I did not expect is that JAVA is actually the fastest, and it is not as fast as a star or a half. Only about 50% of C/C # is required, is JRE specially optimized for computing? Before also used to compare the computing SHA-512, is still the fastest JAVA, I do not know whether it is JRE optimization or compiler optimization, a master can explain it. However, for C and C #, in terms of computing, As long as C has not been particularly optimized, the efficiency difference is not big (almost the same), but C language is a language that only experts do not have cainiao, it is indeed difficult to optimize it better.