Performance optimization for impulse_nr on multi-core-systems (Issue 1671)

This commit is contained in:
Ingo
2013-01-22 19:00:36 +01:00
parent ce43113575
commit 7b0cda5c61

View File

@@ -62,9 +62,9 @@ void ImProcFunctions::impulse_nr (LabImage* lab, double thresh) {
int i1, j1; int i1, j1;
//rangeblur<unsigned short, unsigned int> (lab->L, lpf, impish /*used as buffer here*/, width, height, thresh, false); //rangeblur<unsigned short, unsigned int> (lab->L, lpf, impish /*used as buffer here*/, width, height, thresh, false);
#ifdef _OPENMP #ifdef _OPENMP
#pragma omp parallel #pragma omp parallel
#endif #endif
{ {
AlignedBufferMP<double> buffer(max(width,height)); AlignedBufferMP<double> buffer(max(width,height));
@@ -75,7 +75,11 @@ void ImProcFunctions::impulse_nr (LabImage* lab, double thresh) {
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
float impthr = max(1.0,5.5-thresh); float impthr = max(1.0,5.5-thresh);
float impthrDiv24 = impthr / 24.0f; //Issue 1671: moved the Division outside the loop, impthr can be optimized out too, but I let in the code at the moment
#ifdef _OPENMP
#pragma omp parallel for private(hpfabs, hfnbrave,i1,j1)
#endif
for (int i=0; i < height; i++) for (int i=0; i < height; i++)
for (int j=0; j < width; j++) { for (int j=0; j < width; j++) {
@@ -85,11 +89,19 @@ void ImProcFunctions::impulse_nr (LabImage* lab, double thresh) {
for (j1=max(0,j-2); j1<=min(j+2,width-1); j1++ ) { for (j1=max(0,j-2); j1<=min(j+2,width-1); j1++ ) {
hfnbrave += fabs(lab->L[i1][j1]-lpf[i1][j1]); hfnbrave += fabs(lab->L[i1][j1]-lpf[i1][j1]);
} }
hfnbrave = (hfnbrave-hpfabs)/24; impish[i][j] = (hpfabs>((hfnbrave-hpfabs)*impthrDiv24));
hpfabs>(hfnbrave*impthr) ? impish[i][j]=1 : impish[i][j]=0;
}//now impulsive values have been identified }//now impulsive values have been identified
// Issue 1671:
// often, noise isn't evenly distributed, e.g. only a few noisy pixels in the bright sky, but many in the dark foreground,
// so it's better to schedule dynamic and let every thread only process 16 rows, to avoid running big threads out of work
// Measured it and in fact gives better performance than without schedule(dynamic,16). Of course, there could be a better
// choice for the chunk_size than 16
// race conditions are avoided by the array impish
#ifdef _OPENMP
#pragma omp parallel for private(wtdsum,norm,dirwt,i1,j1) schedule(dynamic,16)
#endif
for (int i=0; i < height; i++) for (int i=0; i < height; i++)
for (int j=0; j < width; j++) { for (int j=0; j < width; j++) {
if (!impish[i][j]) continue; if (!impish[i][j]) continue;