Image processing convolution algorithm implementation and image processing product Algorithm
Today, I am eager to make the traditional convolution algorithm a pure version of the algorithm logic without any acceleration methods.
After writing this article, I found that the performance is okay. I 'd like to share it with you. If you have optimized it again on this basis, it would be even better.
The algorithm is simple:
inline unsigned char Clamp2Byte(int n) {return (((255 - n) >> 31) | (n & ~(n >> 31)));}void Convolution2D(unsigned char * data, unsigned int width, unsigned int height, unsigned int channels, int * filter, unsigned char filterW, unsigned char cfactor, unsigned char bias) {unsigned char * tmpData = (unsigned char * ) malloc(width * height * channels);int factor = 256 / cfactor;int halfW = filterW / 2;if (channels == 3 || channels == 4) {for (int y = 0; y < height; y++) {int y1 = y - halfW + height;for (int x = 0; x < width; x++) {int x1 = x - halfW + width;int r = 0;int g = 0;int b = 0;unsigned int p = (y * width + x) * channels;for (unsigned int fx = 0; fx < filterW; fx++) {int dx = (x1 + fx) % width;int fidx = fx * (filterW);for (unsigned int fy = 0; fy < filterW; fy++) {int pos = (((y1 + fy) % height) * width + dx) * channels;int * pfilter = & filter[fidx + (fy)];r += data[pos] * ( * pfilter);g += data[pos + 1] * ( * pfilter);b += data[pos + 2] * ( * pfilter);}}tmpData[p] = Clamp2Byte(((factor * r) >> 8) + bias);tmpData[p + 1] = Clamp2Byte(((factor * g) >> 8) + bias);tmpData[p + 2] = Clamp2Byte(((factor * b) >> 8) + bias);}}} elseif (channels == 1) {for (int y = 0; y < height; y++) {int y1 = y - halfW + height;for (int x = 0; x < width; x++) {int r = 0;unsigned int p = (y * width + x);int x1 = x - halfW + width;for (unsigned int fx = 0; fx < filterW; fx++) {int dx = (x1 + fx) % width;int fidx = fx * (filterW);for (unsigned int fy = 0; fy < filterW; fy++) {int pos = (((y1 + fy) % height) * width + dx);int szfilter = filter[fidx + (fy)];r += data[pos] * szfilter;}}tmpData[p] = Clamp2Byte(((factor * r) >> 8) + bias);}}}memcpy(data, tmpData, width * height * channels);free(tmpData);}
Call example:
Example // fuzzy int Blurfilter [25] = {0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0,}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, Blurfilter, 5, 13, 0 ); // motion blur int MotionBlurfilter [81] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }; convolution2D (imgData, imgWidth, imgHeight, imgChannels, MotionBlurfilter, 9, 9, 0); // Edge Detection 1int edges1filter [25] = {-1, 0, 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 6, 0, 0, 0, 0,-2, 0, 0, 0, 0,-1 ,}; convolution2D (imgData, imgWidth, imgHeight, imgChannels, edges1filter, 5, 1, 0); // Edge Detection 2int edges2filter [9] = {-1,-1,-1,-1, -1, 8,-1,-1,-1,-1}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, edges2filter, 3, 1, 0 ); // sharpen 1int sharpen1filter [9] = {-1,-1,-1,-1,-9,-1,-1,-1,-1 }; convolution2D (imgData, imgWidth, imgHeight, imgChannels, sharpen1filter, 3, 1, 0); // sharpen 2int sharpen2filter [25] = {-1,-1,-1,-1, -1,-1,-1, 2, 2,-1,-1, 2, 8, 2,-1,-1, 2, 2, 2, -1,-1,-1,-1,-1,-1,}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, sharpen2filter, 5, 8, 0 ); // sharpen 3int sharpen3filter [9] = {1, 1,-7, 1, 1}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, sharpen3filter, 3, 1, 0); // embossed 1 int Embossfilter [9] = {-1,-1, 0,-1, 0, 1, 1}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, Embossfilter, 3, 1,128); // emboss2filter 2int emboss2filter [25] = {-1,-1,-1,-1, 0,-1, -1,-1, 0, 1,-1,-1, 0, 1,-1, 0, 1, 1, 1, 1, 1, 1, 1, 1}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, emboss2filter, 5, 1,128); // mean blur 1int meanfilter [9] = {1, 1, 1, 1, 1, 1}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, meanfilter, 3, 9, 0); // The mean value is 2 int mean2filter [81] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, mean2filter, 9, 81, 0 );
The blogger processed the edge detection convolution kernel in a 960x1280 image, which took 100 milliseconds on the VM.
// Edge Detection 1int edges1filter [25] = {-1, 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0,-2, 0, 0, 0, 0,-1,}; Convolution2D (imgData, imgWidth, imgHeight, imgChannels, edges1filter, 5, 1, 0 );
:
For more information, see the various encyclopedia websites.
Keywords: Convolution)