原发于我的集体博客 集体博客网站
本文次要是针对高斯含糊算法进行优化,最初在线性工夫内实现高斯含糊成果。当然,该算法并非自己原创,实现过程中也借鉴了一些文章和论文,相干链接都在文末贴出
搭配浏览,我写了一个简略的Demo,Demo链接,Demo代码地址,能够在Demo中测试各种含糊成果及其耗时
高斯含糊
高斯含糊能够实现十分平滑的含糊成果,对于每一个像素,它会取它自身及其四周肯定范畴内的像素,去计算含糊后的成果,当然,每个像素在计算过程中所占的权重并不相同,间隔其自身越近,权重越高——这当然是正当的。权重的计算公式如下:
公式中的(sigma)是人为定值的,值越大,生成的图像越含糊
当然,并非所有像素都会相互影响,以以后像素为圆心,取一个半径,在这个半径以内的像素才会影响该像素的含糊成果。这个半径与sigma成正比,不过没有对立的公式,在这个发问下,有答复说NVidia采纳的是radius=sigma*3的计算形式,我在这也应用了这个公式
上面是其根本实现,工夫复杂度为O(nrr),n为图片大小
function gaussianBlur(src, dest, width, height, sigma) { const radius = Math.round(sigma * 3) // kernel size for (let i = 0; i < width; i++) { for (let j = 0; j < height; j++) { let accumulation = 0 let weightSum = 0 for (let dx = -radius; dx <= radius; dx++) { for (let dy = -radius; dy <= radius; dy++) { const x = Math.min(width - 1, Math.max(0, i + dx)) const y = Math.min(height - 1, Math.max(0, j + dy)) // calc weight const weight = Math.exp( -(Math.pow(dx, 2) + Math.pow(dy, 2)) / (2 * Math.pow(sigma, 2)) ) / (Math.PI * 2 * Math.pow(sigma, 2)) accumulation += src[y * width + x] * weight weightSum += weight } } dest[j * width + i] = Math.round(accumulation / weightSum) } }}
成果如下,也能够去 Demo 中尝试(高斯含糊可能耗时较久,急躁期待吧)
原图:
高斯含糊后(sigma=5),5411ms(MBP, 13-inch, 2017, 8G):
方框含糊
最简略的方框含糊(box blur)没有权重,只有在半径内,权重都是雷同的
function simpleBoxBlur(src, dest, width, height, radius) { for (let i = 0; i < width; i++) { for (let j = 0; j < height; j++) { let accumulation = 0 for (let dx = -radius; dx <= radius; dx++) { for (let dy = -radius; dy <= radius; dy++) { const x = Math.min(width - 1, Math.max(0, i + dx)) const y = Math.min(height - 1, Math.max(0, j + dy)) accumulation += src[y * width + x] } } dest[j * width + i] = Math.round( accumulation / Math.pow(2 * radius + 1, 2) ) } }}
radius=15的成果(775ms):
能够看出,尽管耗时较高斯含糊短,但其成果是很不平滑的。那有没有方法让方框含糊领有高斯含糊一样的品质呢,这篇论文提出了计划
依据论文中所述,能够通过屡次的方框含糊实现高斯含糊,而这屡次方框含糊的核由以下步骤得出:
- 由下图公式(n为含糊次数)计算得出wideal,再计算wl和wu,wl为第一个小于等于wideal的奇数,wu是第一个大于等于wideal的奇数(很显著wu = wl+2)(要求奇数的起因是size=radius*2+1)
- 计算m:
- 前m次用wl作核的大小,其余的n-m次用wu作核的大小
化作代码:
function genKernelsForGaussian(sigma, n) { const wIdeal = Math.sqrt((12 * Math.pow(sigma, 2)) / n + 1) let wl = Math.floor(wIdeal) if (wl % 2 === 0) { wl-- } const wu = wl + 2 let m = (12 * Math.pow(sigma, 2) - n * Math.pow(wl, 2) - 4 * n * wl - 3 * n) / (-4 * wl - 4) m = Math.round(m) const sizes = [] for (let i = 0; i < n; i++) { sizes.push(i < m ? wl : wu) } return sizes}function boxBlur(src, dest, width, height, sigma) { const kernels = genKernelsForGaussian(sigma, 3) // radius * 2 + 1 = kernel size simpleBoxBlur(src, dest, width, height, (kernels[0] - 1) / 2) // 留神这里要颠倒 src 和 dest 的程序 simpleBoxBlur(dest, src, width, height, (kernels[1] - 1) / 2) simpleBoxBlur(src, dest, width, height, (kernels[2] - 1) / 2)}
成果和高斯含糊基本一致(227ms):
尽管工夫复杂度与高斯含糊雷同,但失去的半径更小,在sigma=5,n=3的状况下,radius=[4, 4, 5],高斯含糊则是15;同时还不必计算简单的weight,所以从理论后果上看,速度要优于比高斯含糊
程度&垂直含糊
在这篇文章中提到 方框含糊能够用程度含糊+垂直含糊代替实现。要实现程度/垂直含糊很简略, 只需思考程度/垂直线上的像素即可:
// horizontal motion blurfunction hMotionBlur(src, dest, width, height, radius) { for (let i = 0; i < width; i++) { for (let j = 0; j < height; j++) { let accumulation = 0 for (let dx = -radius; dx <= radius; dx++) { const x = Math.min(width - 1, Math.max(0, i + dx)) accumulation += src[j * width + x] } dest[j * width + i] = Math.round(accumulation / (2 * radius + 1)) } }}// vertical motion blurfunction vMotionBlur(src, dest, width, height, radius) { for (let i = 0; i < width; i++) { for (let j = 0; j < height; j++) { let accumulation = 0 for (let dy = -radius; dy <= radius; dy++) { const y = Math.min(height - 1, Math.max(0, j + dy)) accumulation += src[y * width + i] } dest[j * width + i] = Math.round(accumulation / (2 * radius + 1)) } }}
利用此优化的含糊算法,工夫复杂度能够优化到O(nr):
function _mutantBoxBlur(src, dest, width, height, radius) { hMotionBlur(dest, src, width, height, radius) vMotionBlur(src, dest, width, height, radius)}function mutantBoxBlur(src, dest, width, height, sigma) { const boxes = genKernelsForGaussian(sigma, 3) for (let i = 0; i < src.length; i++) { dest[i] = src[i] } _mutantBoxBlur(src, dest, width, height, (boxes[0] - 1) / 2) _mutantBoxBlur(src, dest, width, height, (boxes[1] - 1) / 2) _mutantBoxBlur(src, dest, width, height, (boxes[2] - 1) / 2)}
成果如下(82ms):
终极优化
以程度含糊为例(这里为了不便,用二维数组示意)
而
则
那很显著,咱们能够通过此优化将工夫复杂度由O(nr)降到O(n),最终的代码:
// horizontal fast motion blurfunction hFastMotionBlur(src, dest, width, height, radius) { for (let i = 0; i < height; i++) { let accumulation = radius * src[i * width] for (let j = 0; j <= radius; j++) { accumulation += src[i * width + j] } dest[i * width] = Math.round(accumulation / (2 * radius + 1)) for (let j = 1; j < width; j++) { const left = Math.max(0, j - radius - 1) const right = Math.min(width - 1, j + radius) accumulation = accumulation + (src[i * width + right] - src[i * width + left]) dest[i * width + j] = Math.round(accumulation / (2 * radius + 1)) } }}// vertical fast motion blurfunction vFastMotionBlur(src, dest, width, height, radius) { for (let i = 0; i < width; i++) { let accumulation = radius * src[i] for (let j = 0; j <= radius; j++) { accumulation += src[j * width + i] } dest[i] = Math.round(accumulation / (2 * radius + 1)) for (let j = 1; j < height; j++) { const top = Math.max(0, j - radius - 1) const bottom = Math.min(height - 1, j + radius) accumulation = accumulation + src[bottom * width + i] - src[top * width + i] dest[j * width + i] = Math.round(accumulation / (2 * radius + 1)) } }}function _fastBlur(src, dest, width, height, radius) { hFastMotionBlur(dest, src, width, height, radius) vFastMotionBlur(src, dest, width, height, radius)}function fastBlur(src, dest, width, height, sigma) { const boxes = genKernelsForGaussian(sigma, 3) for (let i = 0; i < src.length; i++) { dest[i] = src[i] } _fastBlur(src, dest, width, height, (boxes[0] - 1) / 2) _fastBlur(src, dest, width, height, (boxes[1] - 1) / 2) _fastBlur(src, dest, width, height, (boxes[2] - 1) / 2)}
成果(20ms):
总结
先后通过屡次方框含糊、程度+垂直含糊代替方框含糊、对程度/垂直含糊计算过程的优化,逐渐将高斯含糊的耗时升高,最初失去了只与图片大小相干的O(n)工夫复杂度算法
参考
- http://elynxsdk.free.fr/ext-d...
- http://blog.ivank.net/fastest...
- https://www.peterkovesi.com/p...
- https://stackoverflow.com/que...
- https://blog.demofox.org/2015...