From b8af63bb047f97a30d78f97818a67ca99cd7f1c0 Mon Sep 17 00:00:00 2001 From: heckflosse Date: Mon, 5 Nov 2018 19:50:24 +0100 Subject: [PATCH] Use vclampf(value, low, high) whereever possible, #4942 --- rtengine/CA_correct_RT.cc | 2 +- rtengine/LUT.h | 10 +++++----- rtengine/curves.h | 12 ++++++------ rtengine/demosaic_algos.cc | 16 ++++++++-------- rtengine/iplabregions.cc | 6 +++--- rtengine/ipretinex.cc | 4 ++-- rtengine/sleefsseavx.c | 5 +++-- 7 files changed, 28 insertions(+), 27 deletions(-) diff --git a/rtengine/CA_correct_RT.cc b/rtengine/CA_correct_RT.cc index 22ad77e63..2fa589110 100644 --- a/rtengine/CA_correct_RT.cc +++ b/rtengine/CA_correct_RT.cc @@ -1252,7 +1252,7 @@ float* RawImageSource::CA_correct_RT( vfloat factors = oldvals / newvals; factors = vself(vmaskf_le(newvals, onev), onev, factors); factors = vself(vmaskf_le(oldvals, onev), onev, factors); - STVFU((*nonGreen)[i/2][j/2], LIMV(factors, zd5v, twov)); + STVFU((*nonGreen)[i/2][j/2], vclampf(factors, zd5v, twov)); } #endif for (; j < W - 2 * cb; j += 2) { diff --git a/rtengine/LUT.h b/rtengine/LUT.h index fedc20ca2..de668cca8 100644 --- a/rtengine/LUT.h +++ b/rtengine/LUT.h @@ -320,7 +320,7 @@ public: // Clamp and convert to integer values. Extract out of SSE register because all // lookup operations use regular addresses. - vfloat clampedIndexes = vmaxf(vminf(maxsv, indexv), ZEROV); // this automagically uses ZEROV in case indexv is NaN + vfloat clampedIndexes = vclampf(indexv, ZEROV, maxsv); // this automagically uses ZEROV in case indexv is NaN vint indexes = _mm_cvttps_epi32(clampedIndexes); int indexArray[4]; _mm_storeu_si128(reinterpret_cast<__m128i*>(&indexArray[0]), indexes); @@ -352,7 +352,7 @@ public: // Clamp and convert to integer values. Extract out of SSE register because all // lookup operations use regular addresses. - vfloat clampedIndexes = vmaxf(vminf(maxsv, indexv), ZEROV); // this automagically uses ZEROV in case indexv is NaN + vfloat clampedIndexes = vclampf(indexv, ZEROV, maxsv); // this automagically uses ZEROV in case indexv is NaN vint indexes = _mm_cvttps_epi32(clampedIndexes); int indexArray[4]; _mm_storeu_si128(reinterpret_cast<__m128i*>(&indexArray[0]), indexes); @@ -372,7 +372,7 @@ public: vfloat lower = _mm_castsi128_ps(_mm_unpacklo_epi64(temp0, temp1)); vfloat upper = _mm_castsi128_ps(_mm_unpackhi_epi64(temp0, temp1)); - vfloat diff = vmaxf(vminf(sizev, indexv), ZEROV) - _mm_cvtepi32_ps(indexes); // this automagically uses ZEROV in case indexv is NaN + vfloat diff = vclampf(indexv, ZEROV, sizev) - _mm_cvtepi32_ps(indexes); // this automagically uses ZEROV in case indexv is NaN return vintpf(diff, upper, lower); } @@ -383,7 +383,7 @@ public: // Clamp and convert to integer values. Extract out of SSE register because all // lookup operations use regular addresses. - vfloat clampedIndexes = vmaxf(vminf(maxsv, indexv), ZEROV); // this automagically uses ZEROV in case indexv is NaN + vfloat clampedIndexes = vclampf(indexv, ZEROV, maxsv); // this automagically uses ZEROV in case indexv is NaN vint indexes = _mm_cvttps_epi32(clampedIndexes); int indexArray[4]; _mm_storeu_si128(reinterpret_cast<__m128i*>(&indexArray[0]), indexes); @@ -421,7 +421,7 @@ public: vfloat operator[](vint idxv) const { // convert to float because SSE2 has no min/max for 32bit integers - vfloat tempv = vmaxf(vminf(sizev, _mm_cvtepi32_ps(idxv)), ZEROV); // this automagically uses ZEROV in case idxv is NaN (which will never happen because it is a vector of int) + vfloat tempv = vclampf(_mm_cvtepi32_ps(idxv), ZEROV, sizev); // this automagically uses ZEROV in case idxv is NaN (which will never happen because it is a vector of int) idxv = _mm_cvttps_epi32(tempv); // access the LUT 4 times. Trust the compiler. It generates good code here, better than hand written SSE code return _mm_setr_ps(data[_mm_cvtsi128_si32(idxv)], diff --git a/rtengine/curves.h b/rtengine/curves.h index 30fb01102..c66c19a27 100644 --- a/rtengine/curves.h +++ b/rtengine/curves.h @@ -1157,9 +1157,9 @@ inline void WeightedStdToneCurve::BatchApply(const size_t start, const size_t en float tmpb[4] ALIGNED16; for (; i + 3 < end; i += 4) { - vfloat r_val = LIMV(LVF(r[i]), ZEROV, c65535v); - vfloat g_val = LIMV(LVF(g[i]), ZEROV, c65535v); - vfloat b_val = LIMV(LVF(b[i]), ZEROV, c65535v); + vfloat r_val = vclampf(LVF(r[i]), ZEROV, c65535v); + vfloat g_val = vclampf(LVF(g[i]), ZEROV, c65535v); + vfloat b_val = vclampf(LVF(b[i]), ZEROV, c65535v); vfloat r1 = lutToneCurve[r_val]; vfloat g1 = Triangle(r_val, r1, g_val); vfloat b1 = Triangle(r_val, r1, b_val); @@ -1172,9 +1172,9 @@ inline void WeightedStdToneCurve::BatchApply(const size_t start, const size_t en vfloat r3 = Triangle(b_val, b3, r_val); vfloat g3 = Triangle(b_val, b3, g_val); - STVF(tmpr[0], LIMV(r1 * zd5v + r2 * zd25v + r3 * zd25v, ZEROV, c65535v)); - STVF(tmpg[0], LIMV(g1 * zd25v + g2 * zd5v + g3 * zd25v, ZEROV, c65535v)); - STVF(tmpb[0], LIMV(b1 * zd25v + b2 * zd25v + b3 * zd5v, ZEROV, c65535v)); + STVF(tmpr[0], vclampf(r1 * zd5v + r2 * zd25v + r3 * zd25v, ZEROV, c65535v)); + STVF(tmpg[0], vclampf(g1 * zd25v + g2 * zd5v + g3 * zd25v, ZEROV, c65535v)); + STVF(tmpb[0], vclampf(b1 * zd25v + b2 * zd25v + b3 * zd5v, ZEROV, c65535v)); for (int j = 0; j < 4; ++j) { setUnlessOOG(r[i+j], g[i+j], b[i+j], tmpr[j], tmpg[j], tmpb[j]); } diff --git a/rtengine/demosaic_algos.cc b/rtengine/demosaic_algos.cc index 10ef0dd0e..a324a7ca6 100644 --- a/rtengine/demosaic_algos.cc +++ b/rtengine/demosaic_algos.cc @@ -1399,7 +1399,7 @@ void RawImageSource::lmmse_interpolate_omp(int winw, int winh, array2D &r // Adapted to RawTherapee by Jacques Desmis 3/2013 // SSE version by Ingo Weyrich 5/2013 #ifdef __SSE2__ -#define CLIPV(a) LIMV(a,zerov,c65535v) +#define CLIPV(a) vclampf(a,zerov,c65535v) void RawImageSource::igv_interpolate(int winw, int winh) { static const float eps = 1e-5f, epssq = 1e-5f; //mod epssq -10f =>-5f Jacques 3/2013 to prevent artifact (divide by zero) @@ -1513,10 +1513,10 @@ void RawImageSource::igv_interpolate(int winw, int winh) //N,E,W,S Hamilton Adams Interpolation // (48.f * 65535.f) = 3145680.f tempv = c40v * LVFU(rgb[0][indx1]); - nvv = LIMV(((c23v * LVFU(rgb[1][(indx - v1) >> 1]) + c23v * LVFU(rgb[1][(indx - v3) >> 1]) + LVFU(rgb[1][(indx - v5) >> 1]) + LVFU(rgb[1][(indx + v1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 - v1)]) - c8v * LVFU(rgb[0][(indx1 - v2)]))) / c3145680v, zerov, onev); - evv = LIMV(((c23v * LVFU(rgb[1][(indx + h1) >> 1]) + c23v * LVFU(rgb[1][(indx + h3) >> 1]) + LVFU(rgb[1][(indx + h5) >> 1]) + LVFU(rgb[1][(indx - h1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 + h1)]) - c8v * LVFU(rgb[0][(indx1 + h2)]))) / c3145680v, zerov, onev); - wvv = LIMV(((c23v * LVFU(rgb[1][(indx - h1) >> 1]) + c23v * LVFU(rgb[1][(indx - h3) >> 1]) + LVFU(rgb[1][(indx - h5) >> 1]) + LVFU(rgb[1][(indx + h1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 - h1)]) - c8v * LVFU(rgb[0][(indx1 - h2)]))) / c3145680v, zerov, onev); - svv = LIMV(((c23v * LVFU(rgb[1][(indx + v1) >> 1]) + c23v * LVFU(rgb[1][(indx + v3) >> 1]) + LVFU(rgb[1][(indx + v5) >> 1]) + LVFU(rgb[1][(indx - v1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 + v1)]) - c8v * LVFU(rgb[0][(indx1 + v2)]))) / c3145680v, zerov, onev); + nvv = vclampf(((c23v * LVFU(rgb[1][(indx - v1) >> 1]) + c23v * LVFU(rgb[1][(indx - v3) >> 1]) + LVFU(rgb[1][(indx - v5) >> 1]) + LVFU(rgb[1][(indx + v1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 - v1)]) - c8v * LVFU(rgb[0][(indx1 - v2)]))) / c3145680v, zerov, onev); + evv = vclampf(((c23v * LVFU(rgb[1][(indx + h1) >> 1]) + c23v * LVFU(rgb[1][(indx + h3) >> 1]) + LVFU(rgb[1][(indx + h5) >> 1]) + LVFU(rgb[1][(indx - h1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 + h1)]) - c8v * LVFU(rgb[0][(indx1 + h2)]))) / c3145680v, zerov, onev); + wvv = vclampf(((c23v * LVFU(rgb[1][(indx - h1) >> 1]) + c23v * LVFU(rgb[1][(indx - h3) >> 1]) + LVFU(rgb[1][(indx - h5) >> 1]) + LVFU(rgb[1][(indx + h1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 - h1)]) - c8v * LVFU(rgb[0][(indx1 - h2)]))) / c3145680v, zerov, onev); + svv = vclampf(((c23v * LVFU(rgb[1][(indx + v1) >> 1]) + c23v * LVFU(rgb[1][(indx + v3) >> 1]) + LVFU(rgb[1][(indx + v5) >> 1]) + LVFU(rgb[1][(indx - v1) >> 1]) + tempv - c32v * LVFU(rgb[0][(indx1 + v1)]) - c8v * LVFU(rgb[0][(indx1 + v2)]))) / c3145680v, zerov, onev); //Horizontal and vertical color differences tempv = LVFU( rgb[0][indx1] ) / c65535v; _mm_storeu_ps( &vdif[indx1], (sgv * nvv + ngv * svv) / (ngv + sgv) - tempv ); @@ -1561,9 +1561,9 @@ void RawImageSource::igv_interpolate(int winw, int winh) for (col = 7 + (FC(row, 1) & 1), indx1 = (row * width + col) >> 1, d = FC(row, col) / 2; col < width - 14; col += 8, indx1 += 4) { //H&V integrated gaussian vector over variance on color differences //Mod Jacques 3/2013 - ngv = LIMV(epssqv + c78v * SQRV(LVFU(vdif[indx1])) + c69v * (SQRV(LVFU(vdif[indx1 - v1])) + SQRV(LVFU(vdif[indx1 + v1]))) + c51v * (SQRV(LVFU(vdif[indx1 - v2])) + SQRV(LVFU(vdif[indx1 + v2]))) + c21v * (SQRV(LVFU(vdif[indx1 - v3])) + SQRV(LVFU(vdif[indx1 + v3]))) - c6v * SQRV(LVFU(vdif[indx1 - v1]) + LVFU(vdif[indx1]) + LVFU(vdif[indx1 + v1])) + ngv = vclampf(epssqv + c78v * SQRV(LVFU(vdif[indx1])) + c69v * (SQRV(LVFU(vdif[indx1 - v1])) + SQRV(LVFU(vdif[indx1 + v1]))) + c51v * (SQRV(LVFU(vdif[indx1 - v2])) + SQRV(LVFU(vdif[indx1 + v2]))) + c21v * (SQRV(LVFU(vdif[indx1 - v3])) + SQRV(LVFU(vdif[indx1 + v3]))) - c6v * SQRV(LVFU(vdif[indx1 - v1]) + LVFU(vdif[indx1]) + LVFU(vdif[indx1 + v1])) - c10v * (SQRV(LVFU(vdif[indx1 - v2]) + LVFU(vdif[indx1 - v1]) + LVFU(vdif[indx1])) + SQRV(LVFU(vdif[indx1]) + LVFU(vdif[indx1 + v1]) + LVFU(vdif[indx1 + v2]))) - c7v * (SQRV(LVFU(vdif[indx1 - v3]) + LVFU(vdif[indx1 - v2]) + LVFU(vdif[indx1 - v1])) + SQRV(LVFU(vdif[indx1 + v1]) + LVFU(vdif[indx1 + v2]) + LVFU(vdif[indx1 + v3]))), zerov, onev); - egv = LIMV(epssqv + c78v * SQRV(LVFU(hdif[indx1])) + c69v * (SQRV(LVFU(hdif[indx1 - h1])) + SQRV(LVFU(hdif[indx1 + h1]))) + c51v * (SQRV(LVFU(hdif[indx1 - h2])) + SQRV(LVFU(hdif[indx1 + h2]))) + c21v * (SQRV(LVFU(hdif[indx1 - h3])) + SQRV(LVFU(hdif[indx1 + h3]))) - c6v * SQRV(LVFU(hdif[indx1 - h1]) + LVFU(hdif[indx1]) + LVFU(hdif[indx1 + h1])) + egv = vclampf(epssqv + c78v * SQRV(LVFU(hdif[indx1])) + c69v * (SQRV(LVFU(hdif[indx1 - h1])) + SQRV(LVFU(hdif[indx1 + h1]))) + c51v * (SQRV(LVFU(hdif[indx1 - h2])) + SQRV(LVFU(hdif[indx1 + h2]))) + c21v * (SQRV(LVFU(hdif[indx1 - h3])) + SQRV(LVFU(hdif[indx1 + h3]))) - c6v * SQRV(LVFU(hdif[indx1 - h1]) + LVFU(hdif[indx1]) + LVFU(hdif[indx1 + h1])) - c10v * (SQRV(LVFU(hdif[indx1 - h2]) + LVFU(hdif[indx1 - h1]) + LVFU(hdif[indx1])) + SQRV(LVFU(hdif[indx1]) + LVFU(hdif[indx1 + h1]) + LVFU(hdif[indx1 + h2]))) - c7v * (SQRV(LVFU(hdif[indx1 - h3]) + LVFU(hdif[indx1 - h2]) + LVFU(hdif[indx1 - h1])) + SQRV(LVFU(hdif[indx1 + h1]) + LVFU(hdif[indx1 + h2]) + LVFU(hdif[indx1 + h3]))), zerov, onev); //Limit chrominance using H/V neighbourhood nvv = median(d725v * LVFU(vdif[indx1]) + d1375v * LVFU(vdif[indx1 - v1]) + d1375v * LVFU(vdif[indx1 + v1]), LVFU(vdif[indx1 - v1]), LVFU(vdif[indx1 + v1])); @@ -2114,7 +2114,7 @@ void RawImageSource::nodemosaic(bool bw) */ #ifdef __SSE2__ -#define CLIPV(a) LIMV(a,ZEROV,c65535v) +#define CLIPV(a) vclampf(a,ZEROV,c65535v) #endif void RawImageSource::refinement(int PassCount) { diff --git a/rtengine/iplabregions.cc b/rtengine/iplabregions.cc index d5bbd6302..d2380494a 100644 --- a/rtengine/iplabregions.cc +++ b/rtengine/iplabregions.cc @@ -204,9 +204,9 @@ BENCHFUN for (int i = 0; i < n; ++i) { vfloat blendv = LVFU(abmask[i][y][x]); vfloat sv = F2V(rs[i]); - vfloat a_newv = LIMV(sv * (av + F2V(abca[i])), cm42000v, c42000v); - vfloat b_newv = LIMV(sv * (bv + F2V(abcb[i])), cm42000v, c42000v); - vfloat l_newv = LIMV(lv * F2V(rl[i]), ZEROV, c32768v); + vfloat a_newv = vclampf(sv * (av + F2V(abca[i])), cm42000v, c42000v); + vfloat b_newv = vclampf(sv * (bv + F2V(abcb[i])), cm42000v, c42000v); + vfloat l_newv = vclampf(lv * F2V(rl[i]), ZEROV, c32768v); lv = vintpf(LVFU(Lmask[i][y][x]), l_newv, lv); av = vintpf(blendv, a_newv, av); bv = vintpf(blendv, b_newv, bv); diff --git a/rtengine/ipretinex.cc b/rtengine/ipretinex.cc index e79ee52a3..fa961fbbb 100644 --- a/rtengine/ipretinex.cc +++ b/rtengine/ipretinex.cc @@ -504,11 +504,11 @@ void RawImageSource::MSR(float** luminance, float** originalLuminance, float **e if(useHslLin) { for (; j < W_L - 3; j += 4) { - _mm_storeu_ps(&luminance[i][j], LVFU(luminance[i][j]) + pondv * (LIMV(LVFU(src[i][j]) / LVFU(out[i][j]), limMinv, limMaxv) )); + _mm_storeu_ps(&luminance[i][j], LVFU(luminance[i][j]) + pondv * (vclampf(LVFU(src[i][j]) / LVFU(out[i][j]), limMinv, limMaxv) )); } } else { for (; j < W_L - 3; j += 4) { - _mm_storeu_ps(&luminance[i][j], LVFU(luminance[i][j]) + pondv * xlogf(LIMV(LVFU(src[i][j]) / LVFU(out[i][j]), limMinv, limMaxv) )); + _mm_storeu_ps(&luminance[i][j], LVFU(luminance[i][j]) + pondv * xlogf(vclampf(LVFU(src[i][j]) / LVFU(out[i][j]), limMinv, limMaxv) )); } } diff --git a/rtengine/sleefsseavx.c b/rtengine/sleefsseavx.c index c68f11ae0..83d937bd1 100644 --- a/rtengine/sleefsseavx.c +++ b/rtengine/sleefsseavx.c @@ -1368,8 +1368,9 @@ static INLINE vfloat xcbrtf(vfloat d) { return y; } -static INLINE vfloat LIMV( vfloat a, vfloat b, vfloat c ) { -return vmaxf( b, vminf(a,c)); +static INLINE vfloat vclampf(vfloat value, vfloat low, vfloat high) { + // clamps value in [low;high], returns low if value is NaN + return vmaxf(vminf(high, value), low); } static INLINE vfloat SQRV(vfloat a){