Opencv - how does the filter2D() method actually work?
我确实在寻找Filter2D的源代码,但找不到它。 Visual c都不能。
这里有关于filter2D算法的专家吗?我知道它应该如何工作,但不知道它实际上如何工作。我制作了自己的filter2d()函数来测试事物,结果与opencvs filter2D()完全不同。这是我的代码:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | Mat myfilter2d(Mat input, Mat filter){ Mat dst = input.clone(); cout <<" filter data successfully found. Rows:" << filter.rows <<" cols:" << filter.cols <<" channels:" << filter.channels() <<"\ "; cout <<" input data successfully found. Rows:" << input.rows <<" cols:" << input.cols <<" channels:" << input.channels() <<"\ "; for (int i = 0-(filter.rows/2);i<input.rows-(filter.rows/2);i++){ for (int j = 0-(filter.cols/2);j<input.cols-(filter.cols/2);j++){ //adding k and l to i and j will make up the difference and allow us to process the whole image float filtertotal = 0; for (int k = 0; k < filter.rows;k++){ for (int l = 0; l < filter.rows;l++){ if(i+k >= 0 && i+k < input.rows && j+l >= 0 && j+l < input.cols){ //don't try to process pixels off the endge of the map float a = input.at<uchar>(i+k,j+l); float b = filter.at<float>(k,l); float product = a * b; filtertotal += product; } } } //filter all proccessed for this pixel, write it to dst st.at<uchar>(i+(filter.rows/2),j+(filter.cols/2)) = filtertotal; } } return dst; } |
有人看到我的实现有问题吗? (除了很慢)
这是我的死刑:
1 2 3 4 5 | cvtColor(src,src_grey,CV_BGR2GRAY); Mat dst = myfilter2d(src_grey,filter); imshow("myfilter2d",dst); filter2D(src_grey,dst2,-1,filter); imshow("filter2d",dst2); |
这是我的内核:
1 2 3 4 5 6 7 | float megapixelarray[basesize][basesize] = { {1,1,-1,1,1}, {1,1,-1,1,1}, {1,1,1,1,1}, {1,1,-1,1,1}, {1,1,-1,1,1} }; |
这是(基本上不同的)结果:
有什么想法吗?
编辑:感谢Brians的回答,我添加了以下代码:
1 2 3 4 | //normalize the kernel so its sum = 1 Scalar mysum = sum(dst); dst = dst / mysum[0]; //make sure its not 0 dst = dst * -1; //show negetive |
和filter2d效果更好。某些过滤器会给出完全匹配的结果,而其他过滤器(例如Sobel)会惨败。
我已经接近实际算法了,但是还没有。还有其他任何想法吗?
我认为问题可能出在比例尺上:如果您输入的图像是8位图像,则在大多数情况下,卷积会产生一个溢出最大值255的值。
在您的实现中,看起来好像您获得了环绕值,但是大多数OpenCV函数通过将最大值设置为最大值(或最小值)来处理溢出。这就解释了为什么OpenCV函数的大部分输出都是白色的,以及为什么在输出中也得到同心的形状。
为此,请通过将每个值除以过滤器的总和来归一化
例如,代替此过滤器(总和= 10):
1 2 3 | 1 1 1 1 2 1 1 1 1 |
尝试使用此过滤器(总和= 1):
1 2 3 | 0.1 0.1 0.1 0.1 0.2 0.1 0.1 0.1 0.1 |
这是我手动创建filter2D的解决方案:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 | #include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> using namespace cv; using namespace std; int main(int argc, const char * argv[]) { Mat img; Mat img_conv; Mat my_kernel; Mat my_conv; // Controlling if the image is loaded correctly img = imread("my_image.jpg",CV_LOAD_IMAGE_COLOR); if(! img.data ) { cout << "Could not open or find the image" << std::endl ; return -1; } imshow("original image", img); img.convertTo(img, CV_64FC3); int kernel_size; // permitted sizes: 3, 5, 7, 9 etc cout <<"Select the size of kernel (it should be an odd number from 3 onwards): \ " << endl; cin >> kernel_size; // Defining the kernel here int selection; cout <<"Select the type of kernel:\ " <<"1. Identity Operator \ 2. Mean Filter \ 3. Spatial shift \ 4. Sharpening\ ->"; cin >> selection; switch (selection){ case 1: my_kernel = (Mat_<double>(kernel_size,kernel_size) << 0, 0, 0, 0, 1, 0, 0, 0, 0); break; case 2: my_kernel = (Mat_<double>(kernel_size,kernel_size) << 1, 1, 1, 1, 1, 1, 1, 1, 1) / ( kernel_size * kernel_size); break; case 3: my_kernel = (Mat_<double>(kernel_size,kernel_size) << 0, 0, 0, 0, 0, 1, 0, 0, 0); break; case 4: my_kernel = (Mat_<double>(kernel_size,kernel_size) << -1, -1, -1, -1, 17, -1, -1, -1, -1) / ( kernel_size * kernel_size); break; default: cerr <<"Invalid selection"; return 1; break; } cout <<"my kernel:\ "<<my_kernel << endl; // Adding the countour of nulls around the original image, to avoid border problems during convolution img_conv = Mat::Mat(img.rows + my_kernel.rows - 1, img.cols + my_kernel.cols - 1, CV_64FC3, CV_RGB(0,0,0)); for (int x=0; x<img.rows; x++) { for (int y=0; y<img.cols; y++) { img_conv.at<Vec3d>(x+1,y+1)[0] = img.at<Vec3d>(x,y)[0]; img_conv.at<Vec3d>(x+1,y+1)[1] = img.at<Vec3d>(x,y)[1]; img_conv.at<Vec3d>(x+1,y+1)[2] = img.at<Vec3d>(x,y)[2]; } } //Performing the convolution my_conv = Mat::Mat(img.rows, img.cols, CV_64FC3, CV_RGB(0,0,0)); for (int x=(my_kernel.rows-1)/2; x<img_conv.rows-((my_kernel.rows-1)/2); x++) { for (int y=(my_kernel.cols-1)/2; y<img_conv.cols-((my_kernel.cols-1)/2); y++) { double comp_1=0; double comp_2=0; double comp_3=0; for (int u=-(my_kernel.rows-1)/2; u<=(my_kernel.rows-1)/2; u++) { for (int v=-(my_kernel.cols-1)/2; v<=(my_kernel.cols-1)/2; v++) { comp_1 = comp_1 + ( img_conv.at<Vec3d>(x+u,y+v)[0] * my_kernel.at<double>(u + ((my_kernel.rows-1)/2) ,v + ((my_kernel.cols-1)/2))); comp_2 = comp_2 + ( img_conv.at<Vec3d>(x+u,y+v)[1] * my_kernel.at<double>(u + ((my_kernel.rows-1)/2),v + ((my_kernel.cols-1)/2))); comp_3 = comp_3 + ( img_conv.at<Vec3d>(x+u,y+v)[2] * my_kernel.at<double>(u + ((my_kernel.rows-1)/2),v + ((my_kernel.cols-1)/2))); } } my_conv.at<Vec3d>(x-((my_kernel.rows-1)/2),y-(my_kernel.cols-1)/2)[0] = comp_1; my_conv.at<Vec3d>(x-((my_kernel.rows-1)/2),y-(my_kernel.cols-1)/2)[1] = comp_2; my_conv.at<Vec3d>(x-((my_kernel.rows-1)/2),y-(my_kernel.cols-1)/2)[2] = comp_3; } } my_conv.convertTo(my_conv, CV_8UC3); imshow("convolution - manual", my_conv); // Performing the filtering using the opencv funtions Mat dst; filter2D(img, dst, -1 , my_kernel, Point( -1, -1 ), 0, BORDER_DEFAULT ); dst.convertTo(dst, CV_8UC3); imshow("convlution - opencv", dst); waitKey(); return 0; } |