diff --git a/rtengine/improccoordinator.cc b/rtengine/improccoordinator.cc index a7c2144fb..a2d107e76 100644 --- a/rtengine/improccoordinator.cc +++ b/rtengine/improccoordinator.cc @@ -712,17 +712,29 @@ void ImProcCoordinator::updatePreviewImage (int todo, Crop* cropCall) * 2017 Jacques Desmis */ + bool isascii = true; + Glib::ustring datainterm = imgsrc->getFileName() + ".mip"; + ofstream finterm (datainterm, ios::out); + + if (finterm.fail()) { + printf ("Non ascii Mip file..switch to Profiles\n"); + isascii = false; + } else { + printf ("ascii Mip file!\n"); + } + + finterm.close(); - // Glib::ustring datalab = imgsrc->getFileName() + ".mip"; // printf("mip file=%s \n", datalab.c_str()); Glib::ustring pop = options.getUserProfilePath() + "/"; Glib::ustring datal; - if (options.mip == MI_opt) { + + if (options.mip == MI_opt || !isascii) { datal = pop + Glib::path_get_basename (imgsrc->getFileName () + ".mip"); } - if (options.mip == MI_prev) { + if (options.mip == MI_prev && isascii) { datal = imgsrc->getFileName() + ".mip"; } diff --git a/rtengine/rawimagesource.cc b/rtengine/rawimagesource.cc index 3f713c6ad..126ec4dea 100644 --- a/rtengine/rawimagesource.cc +++ b/rtengine/rawimagesource.cc @@ -48,24 +48,24 @@ namespace void rotateLine (const float* const line, rtengine::PlanarPtr &channel, const int tran, const int i, const int w, const int h) { - switch(tran & TR_ROT) { + switch (tran & TR_ROT) { case TR_R180: for (int j = 0; j < w; j++) { - channel(h - 1 - i, w - 1 - j) = line[j]; + channel (h - 1 - i, w - 1 - j) = line[j]; } break; case TR_R90: for (int j = 0; j < w; j++) { - channel(j, h - 1 - i) = line[j]; + channel (j, h - 1 - i) = line[j]; } break; case TR_R270: for (int j = 0; j < w; j++) { - channel(w - 1 - j, i) = line[j]; + channel (w - 1 - j, i) = line[j]; } break; @@ -73,7 +73,7 @@ void rotateLine (const float* const line, rtengine::PlanarPtr &channel, c case TR_NONE: default: for (int j = 0; j < w; j++) { - channel(i, j) = line[j]; + channel (i, j) = line[j]; } } } @@ -90,21 +90,21 @@ void transLineFuji (const float* const red, const float* const green, const floa { // Fuji SuperCCD rotation + coarse rotation - int start = ABS(fw - i); + int start = ABS (fw - i); int w = fw * 2 + 1; int h = (imheight - fw) * 2 + 1; - int end = min(h + fw - i, w - fw + i); + int end = min (h + fw - i, w - fw + i); - switch(tran & TR_ROT) { + switch (tran & TR_ROT) { case TR_R180: for (int j = start; j < end; j++) { int y = i + j - fw; int x = fw - i + j; if (x >= 0 && y < image->height && y >= 0 && x < image->width) { - image->r(image->height - 1 - y, image->width - 1 - x) = red[j]; - image->g(image->height - 1 - y, image->width - 1 - x) = green[j]; - image->b(image->height - 1 - y, image->width - 1 - x) = blue[j]; + image->r (image->height - 1 - y, image->width - 1 - x) = red[j]; + image->g (image->height - 1 - y, image->width - 1 - x) = green[j]; + image->b (image->height - 1 - y, image->width - 1 - x) = blue[j]; } } @@ -116,9 +116,9 @@ void transLineFuji (const float* const red, const float* const green, const floa int x = fw - i + j; if (x >= 0 && x < image->height && y >= 0 && y < image->width) { - image->r(image->height - 1 - x, y) = red[j]; - image->g(image->height - 1 - x, y) = green[j]; - image->b(image->height - 1 - x, y) = blue[j]; + image->r (image->height - 1 - x, y) = red[j]; + image->g (image->height - 1 - x, y) = green[j]; + image->b (image->height - 1 - x, y) = blue[j]; } } @@ -130,9 +130,9 @@ void transLineFuji (const float* const red, const float* const green, const floa int x = fw - i + j; if (x >= 0 && y < image->width && y >= 0 && x < image->height) { - image->r(x, image->width - 1 - y) = red[j]; - image->g(x, image->width - 1 - y) = green[j]; - image->b(x, image->width - 1 - y) = blue[j]; + image->r (x, image->width - 1 - y) = red[j]; + image->g (x, image->width - 1 - y) = green[j]; + image->b (x, image->width - 1 - y) = blue[j]; } } @@ -145,9 +145,9 @@ void transLineFuji (const float* const red, const float* const green, const floa int x = fw - i + j; if (x >= 0 && y < image->height && y >= 0 && x < image->width) { - image->r(y, x) = red[j]; - image->g(y, x) = green[j]; - image->b(y, x) = blue[j]; + image->r (y, x) = red[j]; + image->g (y, x) = green[j]; + image->b (y, x) = blue[j]; } } } @@ -160,19 +160,19 @@ void transLineD1x (const float* const red, const float* const green, const float // We have to do vertical interpolation for the 'missing' rows // We do that in combination with coarse rotation - switch(tran & TR_ROT) { + switch (tran & TR_ROT) { case TR_R180: // rotate 180 degree for (int j = 0; j < imwidth; j++) { - image->r(2 * (imheight - 1 - i), imwidth - 1 - j) = red[j]; - image->g(2 * (imheight - 1 - i), imwidth - 1 - j) = green[j]; - image->b(2 * (imheight - 1 - i), imwidth - 1 - j) = blue[j]; + image->r (2 * (imheight - 1 - i), imwidth - 1 - j) = red[j]; + image->g (2 * (imheight - 1 - i), imwidth - 1 - j) = green[j]; + image->b (2 * (imheight - 1 - i), imwidth - 1 - j) = blue[j]; } if (i == 0) { for (int j = 0; j < imwidth; j++) { - image->r(2 * imheight - 1, imwidth - 1 - j) = red[j]; - image->g(2 * imheight - 1, imwidth - 1 - j) = green[j]; - image->b(2 * imheight - 1, imwidth - 1 - j) = blue[j]; + image->r (2 * imheight - 1, imwidth - 1 - j) = red[j]; + image->g (2 * imheight - 1, imwidth - 1 - j) = green[j]; + image->b (2 * imheight - 1, imwidth - 1 - j) = blue[j]; } } @@ -181,19 +181,19 @@ void transLineD1x (const float* const red, const float* const green, const float for (int j = 0; j < imwidth; j++) { int col = imwidth - 1 - j; - image->r(row, col) = (red[j] + image->r(row + 1, col)) / 2; - image->g(row, col) = (green[j] + image->g(row + 1, col)) / 2; - image->b(row, col) = (blue[j] + image->b(row + 1, col)) / 2; + image->r (row, col) = (red[j] + image->r (row + 1, col)) / 2; + image->g (row, col) = (green[j] + image->g (row + 1, col)) / 2; + image->b (row, col) = (blue[j] + image->b (row + 1, col)) / 2; } - if(i == 2 && oddHeight) { + if (i == 2 && oddHeight) { int row = 2 * imheight; for (int j = 0; j < imwidth; j++) { int col = imwidth - 1 - j; - image->r(row, col) = (red[j] + image->r(row - 2, col)) / 2; - image->g(row, col) = (green[j] + image->g(row - 2, col)) / 2; - image->b(row, col) = (blue[j] + image->b(row - 2, col)) / 2; + image->r (row, col) = (red[j] + image->r (row - 2, col)) / 2; + image->g (row, col) = (green[j] + image->g (row - 2, col)) / 2; + image->b (row, col) = (blue[j] + image->b (row - 2, col)) / 2; } } } else if (i == imheight - 1 || i == imheight - 2) { @@ -201,32 +201,32 @@ void transLineD1x (const float* const red, const float* const green, const float for (int j = 0; j < imwidth; j++) { int col = imwidth - 1 - j; - image->r(row, col) = (red[j] + image->r(row + 1, col)) / 2; - image->g(row, col) = (green[j] + image->g(row + 1, col)) / 2; - image->b(row, col) = (blue[j] + image->b(row + 1, col)) / 2; + image->r (row, col) = (red[j] + image->r (row + 1, col)) / 2; + image->g (row, col) = (green[j] + image->g (row + 1, col)) / 2; + image->b (row, col) = (blue[j] + image->b (row + 1, col)) / 2; } row = 2 * imheight - 1 - 2 * i + 2; for (int j = 0; j < imwidth; j++) { int col = imwidth - 1 - j; - image->r(row, col) = (red[j] + image->r(row + 1, col)) / 2; - image->g(row, col) = (green[j] + image->g(row + 1, col)) / 2; - image->b(row, col) = (blue[j] + image->b(row + 1, col)) / 2; + image->r (row, col) = (red[j] + image->r (row + 1, col)) / 2; + image->g (row, col) = (green[j] + image->g (row + 1, col)) / 2; + image->b (row, col) = (blue[j] + image->b (row + 1, col)) / 2; } } else if (i > 2 && i < imheight - 1) { // vertical bicubic interpolation int row = 2 * imheight - 1 - 2 * i + 2; for (int j = 0; j < imwidth; j++) { int col = imwidth - 1 - j; - image->r(row, col) = MAX(0.f, -0.0625f * (red[j] + image->r(row + 3, col)) + 0.5625f * (image->r(row - 1, col) + image->r(row + 1, col))); - image->g(row, col) = MAX(0.f, -0.0625f * (green[j] + image->g(row + 3, col)) + 0.5625f * (image->g(row - 1, col) + image->g(row + 1, col))); - image->b(row, col) = MAX(0.f, -0.0625f * (blue[j] + image->b(row + 3, col)) + 0.5625f * (image->b(row - 1, col) + image->b(row + 1, col))); + image->r (row, col) = MAX (0.f, -0.0625f * (red[j] + image->r (row + 3, col)) + 0.5625f * (image->r (row - 1, col) + image->r (row + 1, col))); + image->g (row, col) = MAX (0.f, -0.0625f * (green[j] + image->g (row + 3, col)) + 0.5625f * (image->g (row - 1, col) + image->g (row + 1, col))); + image->b (row, col) = MAX (0.f, -0.0625f * (blue[j] + image->b (row + 3, col)) + 0.5625f * (image->b (row - 1, col) + image->b (row + 1, col))); - if(clip) { - image->r(row, col) = MIN(image->r(row, col), rtengine::MAXVALF); - image->g(row, col) = MIN(image->g(row, col), rtengine::MAXVALF); - image->b(row, col) = MIN(image->b(row, col), rtengine::MAXVALF); + if (clip) { + image->r (row, col) = MIN (image->r (row, col), rtengine::MAXVALF); + image->g (row, col) = MIN (image->g (row, col), rtengine::MAXVALF); + image->b (row, col) = MIN (image->b (row, col), rtengine::MAXVALF); } } } @@ -234,62 +234,62 @@ void transLineD1x (const float* const red, const float* const green, const float break; case TR_R90: // rotate right - if( i == 0) { + if ( i == 0) { for (int j = 0; j < imwidth; j++) { - image->r(j, 2 * imheight - 1) = red[j]; - image->g(j, 2 * imheight - 1) = green[j]; - image->b(j, 2 * imheight - 1) = blue[j]; + image->r (j, 2 * imheight - 1) = red[j]; + image->g (j, 2 * imheight - 1) = green[j]; + image->b (j, 2 * imheight - 1) = blue[j]; } } for (int j = 0; j < imwidth; j++) { - image->r(j, 2 * (imheight - 1 - i)) = red[j]; - image->g(j, 2 * (imheight - 1 - i)) = green[j]; - image->b(j, 2 * (imheight - 1 - i)) = blue[j]; + image->r (j, 2 * (imheight - 1 - i)) = red[j]; + image->g (j, 2 * (imheight - 1 - i)) = green[j]; + image->b (j, 2 * (imheight - 1 - i)) = blue[j]; } if (i == 1 || i == 2) { // linear interpolation int col = 2 * imheight - 1 - 2 * i; for (int j = 0; j < imwidth; j++) { - image->r(j, col) = (red[j] + image->r(j, col + 1)) / 2; - image->g(j, col) = (green[j] + image->g(j, col + 1)) / 2; - image->b(j, col) = (blue[j] + image->b(j, col + 1)) / 2; + image->r (j, col) = (red[j] + image->r (j, col + 1)) / 2; + image->g (j, col) = (green[j] + image->g (j, col + 1)) / 2; + image->b (j, col) = (blue[j] + image->b (j, col + 1)) / 2; - if(oddHeight && i == 2) { - image->r(j, 2 * imheight) = (red[j] + image->r(j, 2 * imheight - 2)) / 2; - image->g(j, 2 * imheight) = (green[j] + image->g(j, 2 * imheight - 2)) / 2; - image->b(j, 2 * imheight) = (blue[j] + image->b(j, 2 * imheight - 2)) / 2; + if (oddHeight && i == 2) { + image->r (j, 2 * imheight) = (red[j] + image->r (j, 2 * imheight - 2)) / 2; + image->g (j, 2 * imheight) = (green[j] + image->g (j, 2 * imheight - 2)) / 2; + image->b (j, 2 * imheight) = (blue[j] + image->b (j, 2 * imheight - 2)) / 2; } } } else if (i == imheight - 1) { int col = 2 * imheight - 1 - 2 * i; for (int j = 0; j < imwidth; j++) { - image->r(j, col) = (red[j] + image->r(j, col + 1)) / 2; - image->g(j, col) = (green[j] + image->g(j, col + 1)) / 2; - image->b(j, col) = (blue[j] + image->b(j, col + 1)) / 2; + image->r (j, col) = (red[j] + image->r (j, col + 1)) / 2; + image->g (j, col) = (green[j] + image->g (j, col + 1)) / 2; + image->b (j, col) = (blue[j] + image->b (j, col + 1)) / 2; } col = 2 * imheight - 1 - 2 * i + 2; for (int j = 0; j < imwidth; j++) { - image->r(j, col) = (red[j] + image->r(j, col + 1)) / 2; - image->g(j, col) = (green[j] + image->g(j, col + 1)) / 2; - image->b(j, col) = (blue[j] + image->b(j, col + 1)) / 2; + image->r (j, col) = (red[j] + image->r (j, col + 1)) / 2; + image->g (j, col) = (green[j] + image->g (j, col + 1)) / 2; + image->b (j, col) = (blue[j] + image->b (j, col + 1)) / 2; } } else if (i > 2 && i < imheight - 1) { // vertical bicubic interpolation int col = 2 * imheight - 1 - 2 * i + 2; for (int j = 0; j < imwidth; j++) { - image->r(j, col) = MAX(0.f, -0.0625f * (red[j] + image->r(j, col + 3)) + 0.5625f * (image->r(j, col - 1) + image->r(j, col + 1))); - image->g(j, col) = MAX(0.f, -0.0625f * (green[j] + image->g(j, col + 3)) + 0.5625f * (image->g(j, col - 1) + image->g(j, col + 1))); - image->b(j, col) = MAX(0.f, -0.0625f * (blue[j] + image->b(j, col + 3)) + 0.5625f * (image->b(j, col - 1) + image->b(j, col + 1))); + image->r (j, col) = MAX (0.f, -0.0625f * (red[j] + image->r (j, col + 3)) + 0.5625f * (image->r (j, col - 1) + image->r (j, col + 1))); + image->g (j, col) = MAX (0.f, -0.0625f * (green[j] + image->g (j, col + 3)) + 0.5625f * (image->g (j, col - 1) + image->g (j, col + 1))); + image->b (j, col) = MAX (0.f, -0.0625f * (blue[j] + image->b (j, col + 3)) + 0.5625f * (image->b (j, col - 1) + image->b (j, col + 1))); - if(clip) { - image->r(j, col) = MIN(image->r(j, col), rtengine::MAXVALF); - image->g(j, col) = MIN(image->g(j, col), rtengine::MAXVALF); - image->b(j, col) = MIN(image->b(j, col), rtengine::MAXVALF); + if (clip) { + image->r (j, col) = MIN (image->r (j, col), rtengine::MAXVALF); + image->g (j, col) = MIN (image->g (j, col), rtengine::MAXVALF); + image->b (j, col) = MIN (image->b (j, col), rtengine::MAXVALF); } } } @@ -299,57 +299,57 @@ void transLineD1x (const float* const red, const float* const green, const float case TR_R270: // rotate left if (i == 0) { for (int j = imwidth - 1, row = 0; j >= 0; j--, row++) { - image->r(row, 2 * i) = red[j]; - image->g(row, 2 * i) = green[j]; - image->b(row, 2 * i) = blue[j]; + image->r (row, 2 * i) = red[j]; + image->g (row, 2 * i) = green[j]; + image->b (row, 2 * i) = blue[j]; } } else if (i == 1 || i == 2) { // linear interpolation for (int j = imwidth - 1, row = 0; j >= 0; j--, row++) { - image->r(row, 2 * i) = red[j]; - image->g(row, 2 * i) = green[j]; - image->b(row, 2 * i) = blue[j]; - image->r(row, 2 * i - 1) = (red[j] + image->r(row, 2 * i - 2)) * 0.5f; - image->g(row, 2 * i - 1) = (green[j] + image->g(row, 2 * i - 2)) * 0.5f; - image->b(row, 2 * i - 1) = (blue[j] + image->b(row, 2 * i - 2)) * 0.5f; + image->r (row, 2 * i) = red[j]; + image->g (row, 2 * i) = green[j]; + image->b (row, 2 * i) = blue[j]; + image->r (row, 2 * i - 1) = (red[j] + image->r (row, 2 * i - 2)) * 0.5f; + image->g (row, 2 * i - 1) = (green[j] + image->g (row, 2 * i - 2)) * 0.5f; + image->b (row, 2 * i - 1) = (blue[j] + image->b (row, 2 * i - 2)) * 0.5f; } } else if (i > 0 && i < imheight) { // vertical bicubic interpolation for (int j = imwidth - 1, row = 0; j >= 0; j--, row++) { - image->r(row, 2 * i - 3) = MAX(0.f, -0.0625f * (red[j] + image->r(row, 2 * i - 6)) + 0.5625f * (image->r(row, 2 * i - 2) + image->r(row, 2 * i - 4))); - image->g(row, 2 * i - 3) = MAX(0.f, -0.0625f * (green[j] + image->g(row, 2 * i - 6)) + 0.5625f * (image->g(row, 2 * i - 2) + image->g(row, 2 * i - 4))); - image->b(row, 2 * i - 3) = MAX(0.f, -0.0625f * (blue[j] + image->b(row, 2 * i - 6)) + 0.5625f * (image->b(row, 2 * i - 2) + image->b(row, 2 * i - 4))); + image->r (row, 2 * i - 3) = MAX (0.f, -0.0625f * (red[j] + image->r (row, 2 * i - 6)) + 0.5625f * (image->r (row, 2 * i - 2) + image->r (row, 2 * i - 4))); + image->g (row, 2 * i - 3) = MAX (0.f, -0.0625f * (green[j] + image->g (row, 2 * i - 6)) + 0.5625f * (image->g (row, 2 * i - 2) + image->g (row, 2 * i - 4))); + image->b (row, 2 * i - 3) = MAX (0.f, -0.0625f * (blue[j] + image->b (row, 2 * i - 6)) + 0.5625f * (image->b (row, 2 * i - 2) + image->b (row, 2 * i - 4))); - if(clip) { - image->r(row, 2 * i - 3) = MIN(image->r(row, 2 * i - 3), rtengine::MAXVALF); - image->g(row, 2 * i - 3) = MIN(image->g(row, 2 * i - 3), rtengine::MAXVALF); - image->b(row, 2 * i - 3) = MIN(image->b(row, 2 * i - 3), rtengine::MAXVALF); + if (clip) { + image->r (row, 2 * i - 3) = MIN (image->r (row, 2 * i - 3), rtengine::MAXVALF); + image->g (row, 2 * i - 3) = MIN (image->g (row, 2 * i - 3), rtengine::MAXVALF); + image->b (row, 2 * i - 3) = MIN (image->b (row, 2 * i - 3), rtengine::MAXVALF); } - image->r(row, 2 * i) = red[j]; - image->g(row, 2 * i) = green[j]; - image->b(row, 2 * i) = blue[j]; + image->r (row, 2 * i) = red[j]; + image->g (row, 2 * i) = green[j]; + image->b (row, 2 * i) = blue[j]; } } if (i == imheight - 1) { for (int j = imwidth - 1, row = 0; j >= 0; j--, row++) { - image->r(row, 2 * i - 1) = MAX(0.f, -0.0625f * (red[j] + image->r(row, 2 * i - 4)) + 0.5625f * (image->r(row, 2 * i) + image->r(row, 2 * i - 2))); - image->g(row, 2 * i - 1) = MAX(0.f, -0.0625f * (green[j] + image->g(row, 2 * i - 4)) + 0.5625f * (image->g(row, 2 * i) + image->g(row, 2 * i - 2))); - image->b(row, 2 * i - 1) = MAX(0.f, -0.0625f * (blue[j] + image->b(row, 2 * i - 4)) + 0.5625f * (image->b(row, 2 * i) + image->b(row, 2 * i - 2))); + image->r (row, 2 * i - 1) = MAX (0.f, -0.0625f * (red[j] + image->r (row, 2 * i - 4)) + 0.5625f * (image->r (row, 2 * i) + image->r (row, 2 * i - 2))); + image->g (row, 2 * i - 1) = MAX (0.f, -0.0625f * (green[j] + image->g (row, 2 * i - 4)) + 0.5625f * (image->g (row, 2 * i) + image->g (row, 2 * i - 2))); + image->b (row, 2 * i - 1) = MAX (0.f, -0.0625f * (blue[j] + image->b (row, 2 * i - 4)) + 0.5625f * (image->b (row, 2 * i) + image->b (row, 2 * i - 2))); - if(clip) { - image->r(j, 2 * i - 1) = MIN(image->r(j, 2 * i - 1), rtengine::MAXVALF); - image->g(j, 2 * i - 1) = MIN(image->g(j, 2 * i - 1), rtengine::MAXVALF); - image->b(j, 2 * i - 1) = MIN(image->b(j, 2 * i - 1), rtengine::MAXVALF); + if (clip) { + image->r (j, 2 * i - 1) = MIN (image->r (j, 2 * i - 1), rtengine::MAXVALF); + image->g (j, 2 * i - 1) = MIN (image->g (j, 2 * i - 1), rtengine::MAXVALF); + image->b (j, 2 * i - 1) = MIN (image->b (j, 2 * i - 1), rtengine::MAXVALF); } - image->r(row, 2 * i + 1) = (red[j] + image->r(row, 2 * i - 1)) / 2; - image->g(row, 2 * i + 1) = (green[j] + image->g(row, 2 * i - 1)) / 2; - image->b(row, 2 * i + 1) = (blue[j] + image->b(row, 2 * i - 1)) / 2; + image->r (row, 2 * i + 1) = (red[j] + image->r (row, 2 * i - 1)) / 2; + image->g (row, 2 * i + 1) = (green[j] + image->g (row, 2 * i - 1)) / 2; + image->b (row, 2 * i + 1) = (blue[j] + image->b (row, 2 * i - 1)) / 2; if (oddHeight) { - image->r(row, 2 * i + 2) = (red[j] + image->r(row, 2 * i - 2)) / 2; - image->g(row, 2 * i + 2) = (green[j] + image->g(row, 2 * i - 2)) / 2; - image->b(row, 2 * i + 2) = (blue[j] + image->b(row, 2 * i - 2)) / 2; + image->r (row, 2 * i + 2) = (red[j] + image->r (row, 2 * i - 2)) / 2; + image->g (row, 2 * i + 2) = (green[j] + image->g (row, 2 * i - 2)) / 2; + image->b (row, 2 * i + 2) = (blue[j] + image->b (row, 2 * i - 2)) / 2; } } } @@ -364,44 +364,44 @@ void transLineD1x (const float* const red, const float* const green, const float if (i == 1 || i == 2) { // linear interpolation for (int j = 0; j < imwidth; j++) { - image->r(2 * i - 1, j) = (red[j] + image->r(2 * i - 2, j)) / 2; - image->g(2 * i - 1, j) = (green[j] + image->g(2 * i - 2, j)) / 2; - image->b(2 * i - 1, j) = (blue[j] + image->b(2 * i - 2, j)) / 2; + image->r (2 * i - 1, j) = (red[j] + image->r (2 * i - 2, j)) / 2; + image->g (2 * i - 1, j) = (green[j] + image->g (2 * i - 2, j)) / 2; + image->b (2 * i - 1, j) = (blue[j] + image->b (2 * i - 2, j)) / 2; } } else if (i > 2 && i < imheight) { // vertical bicubic interpolation for (int j = 0; j < imwidth; j++) { - image->r(2 * i - 3, j) = MAX(0.f, -0.0625f * (red[j] + image->r(2 * i - 6, j)) + 0.5625f * (image->r(2 * i - 2, j) + image->r(2 * i - 4, j))); - image->g(2 * i - 3, j) = MAX(0.f, -0.0625f * (green[j] + image->g(2 * i - 6, j)) + 0.5625f * (image->g(2 * i - 2, j) + image->g(2 * i - 4, j))); - image->b(2 * i - 3, j) = MAX(0.f, -0.0625f * (blue[j] + image->b(2 * i - 6, j)) + 0.5625f * (image->b(2 * i - 2, j) + image->b(2 * i - 4, j))); + image->r (2 * i - 3, j) = MAX (0.f, -0.0625f * (red[j] + image->r (2 * i - 6, j)) + 0.5625f * (image->r (2 * i - 2, j) + image->r (2 * i - 4, j))); + image->g (2 * i - 3, j) = MAX (0.f, -0.0625f * (green[j] + image->g (2 * i - 6, j)) + 0.5625f * (image->g (2 * i - 2, j) + image->g (2 * i - 4, j))); + image->b (2 * i - 3, j) = MAX (0.f, -0.0625f * (blue[j] + image->b (2 * i - 6, j)) + 0.5625f * (image->b (2 * i - 2, j) + image->b (2 * i - 4, j))); - if(clip) { - image->r(2 * i - 3, j) = MIN(image->r(2 * i - 3, j), rtengine::MAXVALF); - image->g(2 * i - 3, j) = MIN(image->g(2 * i - 3, j), rtengine::MAXVALF); - image->b(2 * i - 3, j) = MIN(image->b(2 * i - 3, j), rtengine::MAXVALF); + if (clip) { + image->r (2 * i - 3, j) = MIN (image->r (2 * i - 3, j), rtengine::MAXVALF); + image->g (2 * i - 3, j) = MIN (image->g (2 * i - 3, j), rtengine::MAXVALF); + image->b (2 * i - 3, j) = MIN (image->b (2 * i - 3, j), rtengine::MAXVALF); } } } if (i == imheight - 1) { for (int j = 0; j < imwidth; j++) { - image->r(2 * i - 1, j) = MAX(0.f, -0.0625f * (red[j] + image->r(2 * i - 4, j)) + 0.5625f * (image->r(2 * i, j) + image->r(2 * i - 2, j))); - image->g(2 * i - 1, j) = MAX(0.f, -0.0625f * (green[j] + image->g(2 * i - 4, j)) + 0.5625f * (image->g(2 * i, j) + image->g(2 * i - 2, j))); - image->b(2 * i - 1, j) = MAX(0.f, -0.0625f * (blue[j] + image->b(2 * i - 4, j)) + 0.5625f * (image->b(2 * i, j) + image->b(2 * i - 2, j))); + image->r (2 * i - 1, j) = MAX (0.f, -0.0625f * (red[j] + image->r (2 * i - 4, j)) + 0.5625f * (image->r (2 * i, j) + image->r (2 * i - 2, j))); + image->g (2 * i - 1, j) = MAX (0.f, -0.0625f * (green[j] + image->g (2 * i - 4, j)) + 0.5625f * (image->g (2 * i, j) + image->g (2 * i - 2, j))); + image->b (2 * i - 1, j) = MAX (0.f, -0.0625f * (blue[j] + image->b (2 * i - 4, j)) + 0.5625f * (image->b (2 * i, j) + image->b (2 * i - 2, j))); - if(clip) { - image->r(2 * i - 1, j) = MIN(image->r(2 * i - 1, j), rtengine::MAXVALF); - image->g(2 * i - 1, j) = MIN(image->g(2 * i - 1, j), rtengine::MAXVALF); - image->b(2 * i - 1, j) = MIN(image->b(2 * i - 1, j), rtengine::MAXVALF); + if (clip) { + image->r (2 * i - 1, j) = MIN (image->r (2 * i - 1, j), rtengine::MAXVALF); + image->g (2 * i - 1, j) = MIN (image->g (2 * i - 1, j), rtengine::MAXVALF); + image->b (2 * i - 1, j) = MIN (image->b (2 * i - 1, j), rtengine::MAXVALF); } - image->r(2 * i + 1, j) = (red[j] + image->r(2 * i - 1, j)) / 2; - image->g(2 * i + 1, j) = (green[j] + image->g(2 * i - 1, j)) / 2; - image->b(2 * i + 1, j) = (blue[j] + image->b(2 * i - 1, j)) / 2; + image->r (2 * i + 1, j) = (red[j] + image->r (2 * i - 1, j)) / 2; + image->g (2 * i + 1, j) = (green[j] + image->g (2 * i - 1, j)) / 2; + image->b (2 * i + 1, j) = (blue[j] + image->b (2 * i - 1, j)) / 2; if (oddHeight) { - image->r(2 * i + 2, j) = (red[j] + image->r(2 * i - 2, j)) / 2; - image->g(2 * i + 2, j) = (green[j] + image->g(2 * i - 2, j)) / 2; - image->b(2 * i + 2, j) = (blue[j] + image->b(2 * i - 2, j)) / 2; + image->r (2 * i + 2, j) = (red[j] + image->r (2 * i - 2, j)) / 2; + image->g (2 * i + 2, j) = (green[j] + image->g (2 * i - 2, j)) / 2; + image->b (2 * i + 2, j) = (blue[j] + image->b (2 * i - 2, j)) / 2; } } } @@ -423,24 +423,24 @@ extern const Settings* settings; RawImageSource::RawImageSource () : ImageSource() - , W(0), H(0) - , plistener(nullptr) - , border(4) - , ri(nullptr) - , cache(nullptr) - , rawData(0, 0) - , green(0, 0) - , red(0, 0) - , blue(0, 0) - , lc00(0.0) - , lc01(0.0) - , lc02(0.0) - , lc10(0.0) - , lc11(0.0) - , lc12(0.0) - , lc20(0.0) - , lc21(0.0) - , lc22(0.0) + , W (0), H (0) + , plistener (nullptr) + , border (4) + , ri (nullptr) + , cache (nullptr) + , rawData (0, 0) + , green (0, 0) + , red (0, 0) + , blue (0, 0) + , lc00 (0.0) + , lc01 (0.0) + , lc02 (0.0) + , lc10 (0.0) + , lc11 (0.0) + , lc12 (0.0) + , lc20 (0.0) + , lc21 (0.0) + , lc22 (0.0) , hlmax{} , clmax{} , chmax{} @@ -449,19 +449,19 @@ RawImageSource::RawImageSource () , c_white{} , cblacksom{} , ref_pre_mul{} - , refwb_red(0.0) - , refwb_green(0.0) - , refwb_blue(0.0) + , refwb_red (0.0) + , refwb_green (0.0) + , refwb_blue (0.0) , rgb_cam{} , cam_rgb{} , xyz_cam{} , cam_xyz{} - , fuji(false) - , d1x(false) - , initialGain(0.0) - , camInitialGain(0.0) - , defGain(0.0) - , threshold(0) + , fuji (false) + , d1x (false) + , initialGain (0.0) + , camInitialGain (0.0) + , defGain (0.0) + , threshold (0) { camProfile = nullptr; embProfile = nullptr; @@ -482,7 +482,7 @@ RawImageSource::~RawImageSource () flushRGB(); flushRawData(); - if( cache ) { + if ( cache ) { delete [] cache; } @@ -527,11 +527,11 @@ void RawImageSource::transformRect (PreviewProps pp, int tran, int &ssx1, int &s sh = w; } - if( pp.w > sw - 2 * border) { + if ( pp.w > sw - 2 * border) { pp.w = sw - 2 * border; } - if( pp.h > sh - 2 * border) { + if ( pp.h > sh - 2 * border) { pp.h = sh - 2 * border; } @@ -547,24 +547,24 @@ void RawImageSource::transformRect (PreviewProps pp, int tran, int &ssx1, int &s int sx1 = ppx; // assuming it's >=0 int sy1 = ppy; // assuming it's >=0 - int sx2 = max(ppx + pp.w, w - 1); - int sy2 = max(ppy + pp.h, h - 1); + int sx2 = max (ppx + pp.w, w - 1); + int sy2 = max (ppy + pp.h, h - 1); if ((tran & TR_ROT) == TR_R180) { - sx1 = max(w - ppx - pp.w, 0); - sy1 = max(h - ppy - pp.h, 0); - sx2 = min(sx1 + pp.w, w - 1); - sy2 = min(sy1 + pp.h, h - 1); + sx1 = max (w - ppx - pp.w, 0); + sy1 = max (h - ppy - pp.h, 0); + sx2 = min (sx1 + pp.w, w - 1); + sy2 = min (sy1 + pp.h, h - 1); } else if ((tran & TR_ROT) == TR_R90) { sx1 = ppy; - sy1 = max(h - ppx - pp.w, 0); - sx2 = min(sx1 + pp.h, w - 1); - sy2 = min(sy1 + pp.w, h - 1); + sy1 = max (h - ppx - pp.w, 0); + sx2 = min (sx1 + pp.h, w - 1); + sy2 = min (sy1 + pp.w, h - 1); } else if ((tran & TR_ROT) == TR_R270) { - sx1 = max(w - ppy - pp.h, 0); + sx1 = max (w - ppy - pp.h, 0); sy1 = ppx; - sx2 = min(sx1 + pp.h, w - 1); - sy2 = min(sy1 + pp.w, h - 1); + sx2 = min (sx1 + pp.h, w - 1); + sy2 = min (sy1 + pp.w, h - 1); } if (fuji) { @@ -585,7 +585,7 @@ void RawImageSource::transformRect (PreviewProps pp, int tran, int &ssx1, int &s } } -float calculate_scale_mul(float scale_mul[4], const float pre_mul_[4], const float c_white[4], const float c_black[4], bool isMono, int colors) +float calculate_scale_mul (float scale_mul[4], const float pre_mul_[4], const float c_white[4], const float c_black[4], bool isMono, int colors) { if (isMono || colors == 1) { for (int c = 0; c < 4; c++) { @@ -602,20 +602,20 @@ float calculate_scale_mul(float scale_mul[4], const float pre_mul_[4], const flo pre_mul[3] = pre_mul[1]; // G2 == G1 } - float maxpremul = max(pre_mul[0], pre_mul[1], pre_mul[2], pre_mul[3]); + float maxpremul = max (pre_mul[0], pre_mul[1], pre_mul[2], pre_mul[3]); for (int c = 0; c < 4; c++) { scale_mul[c] = (pre_mul[c] / maxpremul) * 65535.0 / (c_white[c] - c_black[c]); } } - float gain = max(scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]) / min(scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); + float gain = max (scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]) / min (scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); return gain; } void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* image, const PreviewProps &pp, const ToneCurveParams &hrp, const ColorManagementParams &cmp, const RAWParams &raw ) { - MyMutex::MyLock lock(getImageMutex); + MyMutex::MyLock lock (getImageMutex); tran = defTransform (tran); @@ -625,9 +625,9 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima if (ctemp.getTemp() < 0) { // no white balance, ie revert the pre-process white balance to restore original unbalanced raw camera color - rm = ri->get_pre_mul(0); - gm = ri->get_pre_mul(1); - bm = ri->get_pre_mul(2); + rm = ri->get_pre_mul (0); + gm = ri->get_pre_mul (1); + bm = ri->get_pre_mul (2); } else { ctemp.getMultipliers (r, g, b); rm = imatrices.cam_rgb[0][0] * r + imatrices.cam_rgb[0][1] * g + imatrices.cam_rgb[0][2] * b; @@ -637,12 +637,12 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima if (true) { // adjust gain so the maximum raw value of the least scaled channel just hits max - const float new_pre_mul[4] = { ri->get_pre_mul(0) / rm, ri->get_pre_mul(1) / gm, ri->get_pre_mul(2) / bm, ri->get_pre_mul(3) / gm }; + const float new_pre_mul[4] = { ri->get_pre_mul (0) / rm, ri->get_pre_mul (1) / gm, ri->get_pre_mul (2) / bm, ri->get_pre_mul (3) / gm }; float new_scale_mul[4]; bool isMono = (ri->getSensorType() == ST_FUJI_XTRANS && raw.xtranssensor.method == RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::mono]) || (ri->getSensorType() == ST_BAYER && raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::mono]); - float gain = calculate_scale_mul(new_scale_mul, new_pre_mul, c_white, cblacksom, isMono, ri->get_colors()); + float gain = calculate_scale_mul (new_scale_mul, new_pre_mul, c_white, cblacksom, isMono, ri->get_colors()); rm = new_scale_mul[0] / scale_mul[0] * gain; gm = new_scale_mul[1] / scale_mul[1] * gain; bm = new_scale_mul[2] / scale_mul[2] * gain; @@ -655,7 +655,7 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima gm = refwb_green / gm; bm = refwb_blue / bm; // normalize so larger multiplier becomes 1.0 - float minval = min(rm, gm, bm); + float minval = min (rm, gm, bm); rm /= minval; gm /= minval; bm /= minval; @@ -756,9 +756,9 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima // are not clipped. We have to do that though as we only check pixel by pixel // and don't know if this will transition into a clipped area, if so we need // to clip also surrounding to make a good colour transition - rtot = CLIP(rtot); - gtot = CLIP(gtot); - btot = CLIP(btot); + rtot = CLIP (rtot); + gtot = CLIP (gtot); + btot = CLIP (btot); } line_red[j] = rtot; @@ -776,9 +776,9 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima for (int m = 0; m < skip; m++) for (int n = 0; n < skip; n++) { - rtot += rawData[i + m][(jx + n) * 3 + 0]; - gtot += rawData[i + m][(jx + n) * 3 + 1]; - btot += rawData[i + m][(jx + n) * 3 + 2]; + rtot += rawData[i + m][ (jx + n) * 3 + 0]; + gtot += rawData[i + m][ (jx + n) * 3 + 1]; + btot += rawData[i + m][ (jx + n) * 3 + 2]; } rtot *= rm; @@ -786,9 +786,9 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima btot *= bm; if (doClip) { - rtot = CLIP(rtot); - gtot = CLIP(gtot); - btot = CLIP(btot); + rtot = CLIP (rtot); + gtot = CLIP (gtot); + btot = CLIP (btot); } line_red[j] = rtot; @@ -803,9 +803,9 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima hlRecovery (hrp.method, line_red, line_grn, line_blue, imwidth, hlmax); } - if(d1x) { + if (d1x) { transLineD1x (line_red, line_grn, line_blue, ix, image, tran, imwidth, imheight, d1xHeightOdd, doClip); - } else if(fuji) { + } else if (fuji) { transLineFuji (line_red, line_grn, line_blue, ix, image, tran, imwidth, imheight, fw); } else { transLineStandard (line_red, line_grn, line_blue, ix, image, tran, imwidth, imheight); @@ -822,36 +822,36 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima // first row for (int j = 1 + a; j < image->width - 1; j += 2) { - image->r(0, j) = (image->r(1, j) + image->r(0, j + 1) + image->r(0, j - 1)) / 3; - image->g(0, j) = (image->g(1, j) + image->g(0, j + 1) + image->g(0, j - 1)) / 3; - image->b(0, j) = (image->b(1, j) + image->b(0, j + 1) + image->b(0, j - 1)) / 3; + image->r (0, j) = (image->r (1, j) + image->r (0, j + 1) + image->r (0, j - 1)) / 3; + image->g (0, j) = (image->g (1, j) + image->g (0, j + 1) + image->g (0, j - 1)) / 3; + image->b (0, j) = (image->b (1, j) + image->b (0, j + 1) + image->b (0, j - 1)) / 3; } // other rows for (int i = 1; i < image->height - 1; i++) { for (int j = 2 - (a + i + 1) % 2; j < image->width - 1; j += 2) { // edge-adaptive interpolation - double dh = (ABS(image->r(i, j + 1) - image->r(i, j - 1)) + ABS(image->g(i, j + 1) - image->g(i, j - 1)) + ABS(image->b(i, j + 1) - image->b(i, j - 1))) / 1.0; - double dv = (ABS(image->r(i + 1, j) - image->r(i - 1, j)) + ABS(image->g(i + 1, j) - image->g(i - 1, j)) + ABS(image->b(i + 1, j) - image->b(i - 1, j))) / 1.0; + double dh = (ABS (image->r (i, j + 1) - image->r (i, j - 1)) + ABS (image->g (i, j + 1) - image->g (i, j - 1)) + ABS (image->b (i, j + 1) - image->b (i, j - 1))) / 1.0; + double dv = (ABS (image->r (i + 1, j) - image->r (i - 1, j)) + ABS (image->g (i + 1, j) - image->g (i - 1, j)) + ABS (image->b (i + 1, j) - image->b (i - 1, j))) / 1.0; double eh = 1.0 / (1.0 + dh); double ev = 1.0 / (1.0 + dv); - image->r(i, j) = (eh * (image->r(i, j + 1) + image->r(i, j - 1)) + ev * (image->r(i + 1, j) + image->r(i - 1, j))) / (2.0 * (eh + ev)); - image->g(i, j) = (eh * (image->g(i, j + 1) + image->g(i, j - 1)) + ev * (image->g(i + 1, j) + image->g(i - 1, j))) / (2.0 * (eh + ev)); - image->b(i, j) = (eh * (image->b(i, j + 1) + image->b(i, j - 1)) + ev * (image->b(i + 1, j) + image->b(i - 1, j))) / (2.0 * (eh + ev)); + image->r (i, j) = (eh * (image->r (i, j + 1) + image->r (i, j - 1)) + ev * (image->r (i + 1, j) + image->r (i - 1, j))) / (2.0 * (eh + ev)); + image->g (i, j) = (eh * (image->g (i, j + 1) + image->g (i, j - 1)) + ev * (image->g (i + 1, j) + image->g (i - 1, j))) / (2.0 * (eh + ev)); + image->b (i, j) = (eh * (image->b (i, j + 1) + image->b (i, j - 1)) + ev * (image->b (i + 1, j) + image->b (i - 1, j))) / (2.0 * (eh + ev)); } // first pixel if (2 - (a + i + 1) % 2 == 2) { - image->r(i, 0) = (image->r(i + 1, 0) + image->r(i - 1, 0) + image->r(i, 1)) / 3; - image->g(i, 0) = (image->g(i + 1, 0) + image->g(i - 1, 0) + image->g(i, 1)) / 3; - image->b(i, 0) = (image->b(i + 1, 0) + image->b(i - 1, 0) + image->b(i, 1)) / 3; + image->r (i, 0) = (image->r (i + 1, 0) + image->r (i - 1, 0) + image->r (i, 1)) / 3; + image->g (i, 0) = (image->g (i + 1, 0) + image->g (i - 1, 0) + image->g (i, 1)) / 3; + image->b (i, 0) = (image->b (i + 1, 0) + image->b (i - 1, 0) + image->b (i, 1)) / 3; } // last pixel if (2 - (a + i + image->width) % 2 == 2) { - image->r(i, image->width - 1) = (image->r(i + 1, image->width - 1) + image->r(i - 1, image->width - 1) + image->r(i, image->width - 2)) / 3; - image->g(i, image->width - 1) = (image->g(i + 1, image->width - 1) + image->g(i - 1, image->width - 1) + image->g(i, image->width - 2)) / 3; - image->b(i, image->width - 1) = (image->b(i + 1, image->width - 1) + image->b(i - 1, image->width - 1) + image->b(i, image->width - 2)) / 3; + image->r (i, image->width - 1) = (image->r (i + 1, image->width - 1) + image->r (i - 1, image->width - 1) + image->r (i, image->width - 2)) / 3; + image->g (i, image->width - 1) = (image->g (i + 1, image->width - 1) + image->g (i - 1, image->width - 1) + image->g (i, image->width - 2)) / 3; + image->b (i, image->width - 1) = (image->b (i + 1, image->width - 1) + image->b (i - 1, image->width - 1) + image->b (i, image->width - 2)) / 3; } } @@ -859,9 +859,9 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima int b = (a == 1 && image->height % 2) || (a == 0 && image->height % 2 == 0); for (int j = 1 + b; j < image->width - 1; j += 2) { - image->r(image->height - 1, j) = (image->r(image->height - 2, j) + image->r(image->height - 1, j + 1) + image->r(image->height - 1, j - 1)) / 3; - image->g(image->height - 1, j) = (image->g(image->height - 2, j) + image->g(image->height - 1, j + 1) + image->g(image->height - 1, j - 1)) / 3; - image->b(image->height - 1, j) = (image->b(image->height - 2, j) + image->b(image->height - 1, j + 1) + image->b(image->height - 1, j - 1)) / 3; + image->r (image->height - 1, j) = (image->r (image->height - 2, j) + image->r (image->height - 1, j + 1) + image->r (image->height - 1, j - 1)) / 3; + image->g (image->height - 1, j) = (image->g (image->height - 2, j) + image->g (image->height - 1, j + 1) + image->g (image->height - 1, j - 1)) / 3; + image->b (image->height - 1, j) = (image->b (image->height - 2, j) + image->b (image->height - 1, j + 1) + image->b (image->height - 1, j - 1)) / 3; } } @@ -875,8 +875,8 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima } // Colour correction (only when running on full resolution) - if(pp.skip == 1) { - switch(ri->getSensorType()) { + if (pp.skip == 1) { + switch (ri->getSensorType()) { case ST_BAYER: processFalseColorCorrection (image, raw.bayersensor.ccSteps); break; @@ -887,24 +887,24 @@ void RawImageSource::getImage (const ColorTemp &ctemp, int tran, Imagefloat* ima } } -DCPProfile *RawImageSource::getDCP(const ColorManagementParams &cmp, ColorTemp &wb, DCPProfile::ApplyState &as) +DCPProfile *RawImageSource::getDCP (const ColorManagementParams &cmp, ColorTemp &wb, DCPProfile::ApplyState &as) { DCPProfile *dcpProf = nullptr; cmsHPROFILE dummy; - findInputProfile(cmp.input, nullptr, (static_cast(getMetaData()))->getCamera(), &dcpProf, dummy); + findInputProfile (cmp.input, nullptr, (static_cast (getMetaData()))->getCamera(), &dcpProf, dummy); if (dcpProf == nullptr) { return nullptr; } - dcpProf->setStep2ApplyState(cmp.working, cmp.toneCurve, cmp.applyLookTable, cmp.applyBaselineExposureOffset, as); + dcpProf->setStep2ApplyState (cmp.working, cmp.toneCurve, cmp.applyLookTable, cmp.applyBaselineExposureOffset, as); return dcpProf; } -void RawImageSource::convertColorSpace(Imagefloat* image, const ColorManagementParams &cmp, const ColorTemp &wb) +void RawImageSource::convertColorSpace (Imagefloat* image, const ColorManagementParams &cmp, const ColorTemp &wb) { - double pre_mul[3] = { ri->get_pre_mul(0), ri->get_pre_mul(1), ri->get_pre_mul(2) }; - colorSpaceConversion (image, cmp, wb, pre_mul, embProfile, camProfile, imatrices.xyz_cam, (static_cast(getMetaData()))->getCamera()); + double pre_mul[3] = { ri->get_pre_mul (0), ri->get_pre_mul (1), ri->get_pre_mul (2) }; + colorSpaceConversion (image, cmp, wb, pre_mul, embProfile, camProfile, imatrices.xyz_cam, (static_cast (getMetaData()))->getCamera()); } //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -912,7 +912,7 @@ void RawImageSource::convertColorSpace(Imagefloat* image, const ColorManagementP /* interpolateBadPixelsBayer: correct raw pixels looking at the bitmap * takes into consideration if there are multiple bad pixels in the neighbourhood */ -int RawImageSource::interpolateBadPixelsBayer( PixelsMap &bitmapBads ) +int RawImageSource::interpolateBadPixelsBayer ( PixelsMap &bitmapBads ) { static const float eps = 1.f; int counter = 0; @@ -920,23 +920,23 @@ int RawImageSource::interpolateBadPixelsBayer( PixelsMap &bitmapBads ) #pragma omp parallel for reduction(+:counter) schedule(dynamic,16) #endif - for( int row = 2; row < H - 2; row++ ) { - for(int col = 2; col < W - 2; col++ ) { - int sk = bitmapBads.skipIfZero(col, row); //optimization for a stripe all zero + for ( int row = 2; row < H - 2; row++ ) { + for (int col = 2; col < W - 2; col++ ) { + int sk = bitmapBads.skipIfZero (col, row); //optimization for a stripe all zero - if( sk ) { + if ( sk ) { col += sk - 1; //-1 is because of col++ in cycle continue; } - if(!bitmapBads.get(col, row)) { + if (!bitmapBads.get (col, row)) { continue; } float wtdsum = 0.f, norm = 0.f; // diagonal interpolation - if(FC(row, col) == 1) { + if (FC (row, col) == 1) { // green channel. We can use closer pixels than for red or blue channel. Distance to centre pixel is sqrt(2) => weighting is 0.70710678 // For green channel following pixels will be used for interpolation. Pixel to be interpolated is in centre. // 1 means that pixel is used in this step, if itself and his counterpart are not marked bad @@ -945,12 +945,12 @@ int RawImageSource::interpolateBadPixelsBayer( PixelsMap &bitmapBads ) // 0 0 0 0 0 // 0 1 0 1 0 // 0 0 0 0 0 - for( int dx = -1; dx <= 1; dx += 2) { - if( bitmapBads.get(col + dx, row - 1) || bitmapBads.get(col - dx, row + 1)) { + for ( int dx = -1; dx <= 1; dx += 2) { + if ( bitmapBads.get (col + dx, row - 1) || bitmapBads.get (col - dx, row + 1)) { continue; } - float dirwt = 0.70710678f / ( fabsf( rawData[row - 1][col + dx] - rawData[row + 1][col - dx]) + eps); + float dirwt = 0.70710678f / ( fabsf ( rawData[row - 1][col + dx] - rawData[row + 1][col - dx]) + eps); wtdsum += dirwt * (rawData[row - 1][col + dx] + rawData[row + 1][col - dx]); norm += dirwt; } @@ -963,12 +963,12 @@ int RawImageSource::interpolateBadPixelsBayer( PixelsMap &bitmapBads ) // 0 0 0 0 0 // 0 0 0 0 0 // 1 0 0 0 1 - for( int dx = -2; dx <= 2; dx += 4) { - if( bitmapBads.get(col + dx, row - 2) || bitmapBads.get(col - dx, row + 2)) { + for ( int dx = -2; dx <= 2; dx += 4) { + if ( bitmapBads.get (col + dx, row - 2) || bitmapBads.get (col - dx, row + 2)) { continue; } - float dirwt = 0.35355339f / ( fabsf( rawData[row - 2][col + dx] - rawData[row + 2][col - dx]) + eps); + float dirwt = 0.35355339f / ( fabsf ( rawData[row - 2][col + dx] - rawData[row + 2][col - dx]) + eps); wtdsum += dirwt * (rawData[row - 2][col + dx] + rawData[row + 2][col - dx]); norm += dirwt; } @@ -984,29 +984,29 @@ int RawImageSource::interpolateBadPixelsBayer( PixelsMap &bitmapBads ) // 0 0 1 0 0 // horizontal interpolation - if(!(bitmapBads.get(col - 2, row) || bitmapBads.get(col + 2, row))) { - float dirwt = 0.5f / ( fabsf( rawData[row][col - 2] - rawData[row][col + 2]) + eps); + if (! (bitmapBads.get (col - 2, row) || bitmapBads.get (col + 2, row))) { + float dirwt = 0.5f / ( fabsf ( rawData[row][col - 2] - rawData[row][col + 2]) + eps); wtdsum += dirwt * (rawData[row][col - 2] + rawData[row][col + 2]); norm += dirwt; } // vertical interpolation - if(!(bitmapBads.get(col, row - 2) || bitmapBads.get(col, row + 2))) { - float dirwt = 0.5f / ( fabsf( rawData[row - 2][col] - rawData[row + 2][col]) + eps); + if (! (bitmapBads.get (col, row - 2) || bitmapBads.get (col, row + 2))) { + float dirwt = 0.5f / ( fabsf ( rawData[row - 2][col] - rawData[row + 2][col]) + eps); wtdsum += dirwt * (rawData[row - 2][col] + rawData[row + 2][col]); norm += dirwt; } - if (LIKELY(norm > 0.f)) { // This means, we found at least one pair of valid pixels in the steps above, likelihood of this case is about 99.999% + if (LIKELY (norm > 0.f)) { // This means, we found at least one pair of valid pixels in the steps above, likelihood of this case is about 99.999% rawData[row][col] = wtdsum / (2.f * norm); //gradient weighted average, Factor of 2.f is an optimization to avoid multiplications in former steps counter++; } else { //backup plan -- simple average. Same method for all channels. We could improve this, but it's really unlikely that this case happens int tot = 0; float sum = 0; - for( int dy = -2; dy <= 2; dy += 2) { - for( int dx = -2; dx <= 2; dx += 2) { - if(bitmapBads.get(col + dx, row + dy)) { + for ( int dy = -2; dy <= 2; dy += 2) { + for ( int dx = -2; dx <= 2; dx += 2) { + if (bitmapBads.get (col + dx, row + dy)) { continue; } @@ -1029,7 +1029,7 @@ int RawImageSource::interpolateBadPixelsBayer( PixelsMap &bitmapBads ) /* interpolateBadPixels3Colours: correct raw pixels looking at the bitmap * takes into consideration if there are multiple bad pixels in the neighbourhood */ -int RawImageSource::interpolateBadPixelsNColours( PixelsMap &bitmapBads, const int colours ) +int RawImageSource::interpolateBadPixelsNColours ( PixelsMap &bitmapBads, const int colours ) { static const float eps = 1.f; int counter = 0; @@ -1037,16 +1037,16 @@ int RawImageSource::interpolateBadPixelsNColours( PixelsMap &bitmapBads, const i #pragma omp parallel for reduction(+:counter) schedule(dynamic,16) #endif - for( int row = 2; row < H - 2; row++ ) { - for(int col = 2; col < W - 2; col++ ) { - int sk = bitmapBads.skipIfZero(col, row); //optimization for a stripe all zero + for ( int row = 2; row < H - 2; row++ ) { + for (int col = 2; col < W - 2; col++ ) { + int sk = bitmapBads.skipIfZero (col, row); //optimization for a stripe all zero - if( sk ) { + if ( sk ) { col += sk - 1; //-1 is because of col++ in cycle continue; } - if(!bitmapBads.get(col, row)) { + if (!bitmapBads.get (col, row)) { continue; } @@ -1057,38 +1057,38 @@ int RawImageSource::interpolateBadPixelsNColours( PixelsMap &bitmapBads, const i } // diagonal interpolation - for( int dx = -1; dx <= 1; dx += 2) { - if( bitmapBads.get(col + dx, row - 1) || bitmapBads.get(col - dx, row + 1)) { + for ( int dx = -1; dx <= 1; dx += 2) { + if ( bitmapBads.get (col + dx, row - 1) || bitmapBads.get (col - dx, row + 1)) { continue; } - for(int c = 0; c < colours; c++) { - float dirwt = 0.70710678f / ( fabsf( rawData[row - 1][(col + dx) * colours + c] - rawData[row + 1][(col - dx) * colours + c]) + eps); - wtdsum[c] += dirwt * (rawData[row - 1][(col + dx) * colours + c] + rawData[row + 1][(col - dx) * colours + c]); + for (int c = 0; c < colours; c++) { + float dirwt = 0.70710678f / ( fabsf ( rawData[row - 1][ (col + dx) * colours + c] - rawData[row + 1][ (col - dx) * colours + c]) + eps); + wtdsum[c] += dirwt * (rawData[row - 1][ (col + dx) * colours + c] + rawData[row + 1][ (col - dx) * colours + c]); norm[c] += dirwt; } } // horizontal interpolation - if(!(bitmapBads.get(col - 1, row) || bitmapBads.get(col + 1, row))) { - for(int c = 0; c < colours; c++) { - float dirwt = 1.f / ( fabsf( rawData[row][(col - 1) * colours + c] - rawData[row][(col + 1) * colours + c]) + eps); - wtdsum[c] += dirwt * (rawData[row][(col - 1) * colours + c] + rawData[row][(col + 1) * colours + c]); + if (! (bitmapBads.get (col - 1, row) || bitmapBads.get (col + 1, row))) { + for (int c = 0; c < colours; c++) { + float dirwt = 1.f / ( fabsf ( rawData[row][ (col - 1) * colours + c] - rawData[row][ (col + 1) * colours + c]) + eps); + wtdsum[c] += dirwt * (rawData[row][ (col - 1) * colours + c] + rawData[row][ (col + 1) * colours + c]); norm[c] += dirwt; } } // vertical interpolation - if(!(bitmapBads.get(col, row - 1) || bitmapBads.get(col, row + 1))) { - for(int c = 0; c < colours; c++) { - float dirwt = 1.f / ( fabsf( rawData[row - 1][col * colours + c] - rawData[row + 1][col * colours + c]) + eps); + if (! (bitmapBads.get (col, row - 1) || bitmapBads.get (col, row + 1))) { + for (int c = 0; c < colours; c++) { + float dirwt = 1.f / ( fabsf ( rawData[row - 1][col * colours + c] - rawData[row + 1][col * colours + c]) + eps); wtdsum[c] += dirwt * (rawData[row - 1][col * colours + c] + rawData[row + 1][col * colours + c]); norm[c] += dirwt; } } - if (LIKELY(norm[0] > 0.f)) { // This means, we found at least one pair of valid pixels in the steps above, likelihood of this case is about 99.999% - for(int c = 0; c < colours; c++) { + if (LIKELY (norm[0] > 0.f)) { // This means, we found at least one pair of valid pixels in the steps above, likelihood of this case is about 99.999% + for (int c = 0; c < colours; c++) { rawData[row][col * colours + c] = wtdsum[c] / (2.f * norm[c]); //gradient weighted average, Factor of 2.f is an optimization to avoid multiplications in former steps } @@ -1101,14 +1101,14 @@ int RawImageSource::interpolateBadPixelsNColours( PixelsMap &bitmapBads, const i sum[i] = 0.f; } - for( int dy = -2; dy <= 2; dy += 2) { - for( int dx = -2; dx <= 2; dx += 2) { - if(bitmapBads.get(col + dx, row + dy)) { + for ( int dy = -2; dy <= 2; dy += 2) { + for ( int dx = -2; dx <= 2; dx += 2) { + if (bitmapBads.get (col + dx, row + dy)) { continue; } - for(int c = 0; c < colours; c++) { - sum[c] += rawData[row + dy][(col + dx) * colours + c]; + for (int c = 0; c < colours; c++) { + sum[c] += rawData[row + dy][ (col + dx) * colours + c]; } tot++; @@ -1116,7 +1116,7 @@ int RawImageSource::interpolateBadPixelsNColours( PixelsMap &bitmapBads, const i } if (tot > 0) { - for(int c = 0; c < colours; c++) { + for (int c = 0; c < colours; c++) { rawData[row][col * colours + c] = sum[c] / tot; } @@ -1131,7 +1131,7 @@ int RawImageSource::interpolateBadPixelsNColours( PixelsMap &bitmapBads, const i /* interpolateBadPixelsXtrans: correct raw pixels looking at the bitmap * takes into consideration if there are multiple bad pixels in the neighbourhood */ -int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) +int RawImageSource::interpolateBadPixelsXtrans ( PixelsMap &bitmapBads ) { static const float eps = 1.f; int counter = 0; @@ -1139,26 +1139,26 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) #pragma omp parallel for reduction(+:counter) schedule(dynamic,16) #endif - for( int row = 2; row < H - 2; row++ ) { - for(int col = 2; col < W - 2; col++ ) { - int skip = bitmapBads.skipIfZero(col, row); //optimization for a stripe all zero + for ( int row = 2; row < H - 2; row++ ) { + for (int col = 2; col < W - 2; col++ ) { + int skip = bitmapBads.skipIfZero (col, row); //optimization for a stripe all zero - if( skip ) { + if ( skip ) { col += skip - 1; //-1 is because of col++ in cycle continue; } - if(!bitmapBads.get(col, row)) { + if (!bitmapBads.get (col, row)) { continue; } float wtdsum = 0.f, norm = 0.f; - int pixelColor = ri->XTRANSFC(row, col); + int pixelColor = ri->XTRANSFC (row, col); float oldval = rawData[row][col]; - if(pixelColor == 1) { + if (pixelColor == 1) { // green channel. A green pixel can either be a solitary green pixel or a member of a 2x2 square of green pixels - if(ri->XTRANSFC(row, col - 1) == ri->XTRANSFC(row, col + 1)) { + if (ri->XTRANSFC (row, col - 1) == ri->XTRANSFC (row, col + 1)) { // If left and right neighbour have same colour, then this is a solitary green pixel // For these the following pixels will be used for interpolation. Pixel to be interpolated is in centre and marked with a P. // Pairs of pixels used in this step are numbered. A pair will be used if none of the pixels of the pair is marked bad @@ -1168,32 +1168,32 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) // 0 0 P 0 0 // 4 6 0 5 3 // 0 2 0 1 0 - for( int dx = -1; dx <= 1; dx += 2) { // pixels marked 5 or 6 in above example. Distance to P is sqrt(2) => weighting is 0.70710678f - if( bitmapBads.get(col + dx, row - 1) || bitmapBads.get(col - dx, row + 1)) { + for ( int dx = -1; dx <= 1; dx += 2) { // pixels marked 5 or 6 in above example. Distance to P is sqrt(2) => weighting is 0.70710678f + if ( bitmapBads.get (col + dx, row - 1) || bitmapBads.get (col - dx, row + 1)) { continue; } - float dirwt = 0.70710678f / ( fabsf( rawData[row - 1][col + dx] - rawData[row + 1][col - dx]) + eps); + float dirwt = 0.70710678f / ( fabsf ( rawData[row - 1][col + dx] - rawData[row + 1][col - dx]) + eps); wtdsum += dirwt * (rawData[row - 1][col + dx] + rawData[row + 1][col - dx]); norm += dirwt; } - for( int dx = -1; dx <= 1; dx += 2) { // pixels marked 1 or 2 on above example. Distance to P is sqrt(5) => weighting is 0.44721359f - if( bitmapBads.get(col + dx, row - 2) || bitmapBads.get(col - dx, row + 2)) { + for ( int dx = -1; dx <= 1; dx += 2) { // pixels marked 1 or 2 on above example. Distance to P is sqrt(5) => weighting is 0.44721359f + if ( bitmapBads.get (col + dx, row - 2) || bitmapBads.get (col - dx, row + 2)) { continue; } - float dirwt = 0.44721359f / ( fabsf( rawData[row - 2][col + dx] - rawData[row + 2][col - dx]) + eps); + float dirwt = 0.44721359f / ( fabsf ( rawData[row - 2][col + dx] - rawData[row + 2][col - dx]) + eps); wtdsum += dirwt * (rawData[row - 2][col + dx] + rawData[row + 2][col - dx]); norm += dirwt; } - for( int dx = -2; dx <= 2; dx += 4) { // pixels marked 3 or 4 on above example. Distance to P is sqrt(5) => weighting is 0.44721359f - if( bitmapBads.get(col + dx, row - 1) || bitmapBads.get(col - dx, row + 1)) { + for ( int dx = -2; dx <= 2; dx += 4) { // pixels marked 3 or 4 on above example. Distance to P is sqrt(5) => weighting is 0.44721359f + if ( bitmapBads.get (col + dx, row - 1) || bitmapBads.get (col - dx, row + 1)) { continue; } - float dirwt = 0.44721359f / ( fabsf( rawData[row - 1][col + dx] - rawData[row + 1][col - dx]) + eps); + float dirwt = 0.44721359f / ( fabsf ( rawData[row - 1][col + dx] - rawData[row + 1][col - dx]) + eps); wtdsum += dirwt * (rawData[row - 1][col + dx] + rawData[row + 1][col - dx]); norm += dirwt; } @@ -1208,20 +1208,20 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) // 3 0 0 0 // pixels marked 1 in above example. Distance to P is sqrt(2) => weighting is 0.70710678f - int offset1 = ri->XTRANSFC(row - 1, col - 1) == ri->XTRANSFC(row + 1, col + 1) ? 1 : -1; + int offset1 = ri->XTRANSFC (row - 1, col - 1) == ri->XTRANSFC (row + 1, col + 1) ? 1 : -1; - if( !(bitmapBads.get(col - offset1, row - 1) || bitmapBads.get(col + offset1, row + 1))) { - float dirwt = 0.70710678f / ( fabsf( rawData[row - 1][col - offset1] - rawData[row + 1][col + offset1]) + eps); + if ( ! (bitmapBads.get (col - offset1, row - 1) || bitmapBads.get (col + offset1, row + 1))) { + float dirwt = 0.70710678f / ( fabsf ( rawData[row - 1][col - offset1] - rawData[row + 1][col + offset1]) + eps); wtdsum += dirwt * (rawData[row - 1][col - offset1] + rawData[row + 1][col + offset1]); norm += dirwt; } // pixels marked 2 in above example. Distance to P is 1 => weighting is 1.f - int offsety = (ri->XTRANSFC(row - 1, col) != 1 ? 1 : -1); + int offsety = (ri->XTRANSFC (row - 1, col) != 1 ? 1 : -1); int offsetx = offset1 * offsety; - if( !(bitmapBads.get(col + offsetx, row) || bitmapBads.get(col, row + offsety))) { - float dirwt = 1.f / ( fabsf( rawData[row][col + offsetx] - rawData[row + offsety][col]) + eps); + if ( ! (bitmapBads.get (col + offsetx, row) || bitmapBads.get (col, row + offsety))) { + float dirwt = 1.f / ( fabsf ( rawData[row][col + offsetx] - rawData[row + offsety][col]) + eps); wtdsum += dirwt * (rawData[row][col + offsetx] + rawData[row + offsety][col]); norm += dirwt; } @@ -1232,8 +1232,8 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) offsety *= 2; // pixels marked 3 in above example. Distance to P is sqrt(5) => weighting is 0.44721359f - if( !(bitmapBads.get(col + offsetx, row + offsety2) || bitmapBads.get(col + offsetx2, row + offsety))) { - float dirwt = 0.44721359f / ( fabsf( rawData[row + offsety2][col + offsetx] - rawData[row + offsety][col + offsetx2]) + eps); + if ( ! (bitmapBads.get (col + offsetx, row + offsety2) || bitmapBads.get (col + offsetx2, row + offsety))) { + float dirwt = 0.44721359f / ( fabsf ( rawData[row + offsety2][col + offsetx] - rawData[row + offsety][col + offsetx2]) + eps); wtdsum += dirwt * (rawData[row + offsety2][col + offsetx] + rawData[row + offsety][col + offsetx2]); norm += dirwt; } @@ -1256,11 +1256,11 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) // When we find a knight pixel, we get its counterpart, which has distance (+-3,+-3), where the signs of distance depend on the corner of the found knight pixel. // These pixels are marked 1 or 2 in above examples. Distance to P is sqrt(5) => weighting is 0.44721359f // The following loop simply scans the four possible places. To keep things simple, it does not stop after finding two knight pixels, because it will not find more than two - for(int d1 = -2, offsety = 3; d1 <= 2; d1 += 4, offsety -= 6) { - for(int d2 = -1, offsetx = 3; d2 < 1; d2 += 2, offsetx -= 6) { - if(ri->XTRANSFC(row + d1, col + d2) == pixelColor) { - if( !(bitmapBads.get(col + d2, row + d1) || bitmapBads.get(col + d2 + offsetx, row + d1 + offsety))) { - float dirwt = 0.44721359f / ( fabsf( rawData[row + d1][col + d2] - rawData[row + d1 + offsety][col + d2 + offsetx]) + eps); + for (int d1 = -2, offsety = 3; d1 <= 2; d1 += 4, offsety -= 6) { + for (int d2 = -1, offsetx = 3; d2 < 1; d2 += 2, offsetx -= 6) { + if (ri->XTRANSFC (row + d1, col + d2) == pixelColor) { + if ( ! (bitmapBads.get (col + d2, row + d1) || bitmapBads.get (col + d2 + offsetx, row + d1 + offsety))) { + float dirwt = 0.44721359f / ( fabsf ( rawData[row + d1][col + d2] - rawData[row + d1 + offsety][col + d2 + offsetx]) + eps); wtdsum += dirwt * (rawData[row + d1][col + d2] + rawData[row + d1 + offsety][col + d2 + offsetx]); norm += dirwt; } @@ -1273,35 +1273,35 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) int dx, dy; // check horizontal - for(dx = -2, dy = 0; dx <= 2 && !distance2PixelFound; dx += 4) - if(ri->XTRANSFC(row, col + dx) == pixelColor) { + for (dx = -2, dy = 0; dx <= 2 && !distance2PixelFound; dx += 4) + if (ri->XTRANSFC (row, col + dx) == pixelColor) { distance2PixelFound = true; } - if(!distance2PixelFound) + if (!distance2PixelFound) // no distance 2 pixel on horizontal, check vertical - for(dx = 0, dy = -2; dy <= 2 && !distance2PixelFound; dy += 4) - if(ri->XTRANSFC(row + dy, col) == pixelColor) { + for (dx = 0, dy = -2; dy <= 2 && !distance2PixelFound; dy += 4) + if (ri->XTRANSFC (row + dy, col) == pixelColor) { distance2PixelFound = true; } // calculate the value of its virtual counterpart (marked with a V in above examples) float virtualPixel; - if(dy == 0) { + if (dy == 0) { virtualPixel = 0.5f * (rawData[row - 1][col - dx] + rawData[row + 1][col - dx]); } else { virtualPixel = 0.5f * (rawData[row - dy][col - 1] + rawData[row - dy][col + 1]); } // and weight as usual. Distance to P is 2 => weighting is 0.5f - float dirwt = 0.5f / ( fabsf( virtualPixel - rawData[row + dy][col + dx]) + eps); + float dirwt = 0.5f / ( fabsf ( virtualPixel - rawData[row + dy][col + dx]) + eps); wtdsum += dirwt * (virtualPixel + rawData[row + dy][col + dx]); norm += dirwt; } - if (LIKELY(norm > 0.f)) { // This means, we found at least one pair of valid pixels in the steps above, likelihood of this case is about 99.999% + if (LIKELY (norm > 0.f)) { // This means, we found at least one pair of valid pixels in the steps above, likelihood of this case is about 99.999% rawData[row][col] = wtdsum / (2.f * norm); //gradient weighted average, Factor of 2.f is an optimization to avoid multiplications in former steps counter++; } @@ -1317,7 +1317,7 @@ int RawImageSource::interpolateBadPixelsXtrans( PixelsMap &bitmapBads ) * (Taken from Emil Martinec idea) * (Optimized by Ingo Weyrich 2013 and 2015) */ -SSEFUNCTION int RawImageSource::findHotDeadPixels( PixelsMap &bpMap, float thresh, bool findHotPixels, bool findDeadPixels ) +SSEFUNCTION int RawImageSource::findHotDeadPixels ( PixelsMap &bpMap, float thresh, bool findHotPixels, bool findDeadPixels ) { float varthresh = (20.0 * (thresh / 100.0) + 1.0 ) / 24.f; @@ -1338,9 +1338,9 @@ SSEFUNCTION int RawImageSource::findHotDeadPixels( PixelsMap &bpMap, float thres for (int i = 2; i < H - 2; i++) { for (int j = 2; j < W - 2; j++) { - const float& temp = median(rawData[i - 2][j - 2], rawData[i - 2][j], rawData[i - 2][j + 2], - rawData[i][j - 2], rawData[i][j], rawData[i][j + 2], - rawData[i + 2][j - 2], rawData[i + 2][j], rawData[i + 2][j + 2]); + const float& temp = median (rawData[i - 2][j - 2], rawData[i - 2][j], rawData[i - 2][j + 2], + rawData[i][j - 2], rawData[i][j], rawData[i][j + 2], + rawData[i + 2][j - 2], rawData[i + 2][j], rawData[i + 2][j + 2]); cfablur[i * W + j] = rawData[i][j] - temp; } } @@ -1351,24 +1351,24 @@ SSEFUNCTION int RawImageSource::findHotDeadPixels( PixelsMap &bpMap, float thres #pragma omp single #endif { - for(int i = 0; i < 2; i++) { - for(int j = 0; j < W; j++) { + for (int i = 0; i < 2; i++) { + for (int j = 0; j < W; j++) { cfablur[i * W + j] = 0.f; } } - for(int i = 2; i < H - 2; i++) { - for(int j = 0; j < 2; j++) { + for (int i = 2; i < H - 2; i++) { + for (int j = 0; j < 2; j++) { cfablur[i * W + j] = 0.f; } - for(int j = W - 2; j < W; j++) { + for (int j = W - 2; j < W; j++) { cfablur[i * W + j] = 0.f; } } - for(int i = H - 2; i < H; i++) { - for(int j = 0; j < W; j++) { + for (int i = H - 2; i < H; i++) { + for (int j = 0; j < W; j++) { cfablur[i * W + j] = 0.f; } } @@ -1387,34 +1387,34 @@ SSEFUNCTION int RawImageSource::findHotDeadPixels( PixelsMap &bpMap, float thres //evaluate pixel for heat/death float pixdev = cfablur[rrmWpcc]; - if(pixdev == 0.f) { + if (pixdev == 0.f) { continue; } - if((!findDeadPixels) && pixdev < 0) { + if ((!findDeadPixels) && pixdev < 0) { continue; } - if((!findHotPixels) && pixdev > 0) { + if ((!findHotPixels) && pixdev > 0) { continue; } - pixdev = fabsf(pixdev); + pixdev = fabsf (pixdev); float hfnbrave = -pixdev; #ifdef __SSE2__ // sum up 5*4 = 20 values using SSE // 10 fabs function calls and float 10 additions with SSE - vfloat sum = vabsf(LVFU(cfablur[(rr - 2) * W + cc - 2])) + vabsf(LVFU(cfablur[(rr - 1) * W + cc - 2])); - sum += vabsf(LVFU(cfablur[(rr) * W + cc - 2])); - sum += vabsf(LVFU(cfablur[(rr + 1) * W + cc - 2])); - sum += vabsf(LVFU(cfablur[(rr + 2) * W + cc - 2])); + vfloat sum = vabsf (LVFU (cfablur[ (rr - 2) * W + cc - 2])) + vabsf (LVFU (cfablur[ (rr - 1) * W + cc - 2])); + sum += vabsf (LVFU (cfablur[ (rr) * W + cc - 2])); + sum += vabsf (LVFU (cfablur[ (rr + 1) * W + cc - 2])); + sum += vabsf (LVFU (cfablur[ (rr + 2) * W + cc - 2])); // horizontally add the values and add the result to hfnbrave - hfnbrave += vhadd(sum); + hfnbrave += vhadd (sum); // add remaining 5 values of last column for (int mm = rr - 2; mm <= rr + 2; mm++) { - hfnbrave += fabsf(cfablur[mm * W + cc + 2]); + hfnbrave += fabsf (cfablur[mm * W + cc + 2]); } #else @@ -1422,7 +1422,7 @@ SSEFUNCTION int RawImageSource::findHotDeadPixels( PixelsMap &bpMap, float thres // 25 fabs function calls and 25 float additions without SSE for (int mm = rr - 2; mm <= rr + 2; mm++) { for (int nn = cc - 2; nn <= cc + 2; nn++) { - hfnbrave += fabsf(cfablur[mm * W + nn]); + hfnbrave += fabsf (cfablur[mm * W + nn]); } } @@ -1430,7 +1430,7 @@ SSEFUNCTION int RawImageSource::findHotDeadPixels( PixelsMap &bpMap, float thres if (pixdev > varthresh * hfnbrave) { // mark the pixel as "bad" - bpMap.set(cc, rr); + bpMap.set (cc, rr); counter++; } }//end of pixel evaluation @@ -1506,7 +1506,7 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) plistener->setProgress (0.0); } - ri = new RawImage(fname); + ri = new RawImage (fname); int errCode = ri->loadRaw (true, true, plistener, 0.8); if (errCode) { @@ -1526,18 +1526,18 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) { - imatrices.rgb_cam[i][j] = ri->get_rgb_cam(i, j); + imatrices.rgb_cam[i][j] = ri->get_rgb_cam (i, j); } // compute inverse of the color transformation matrix // first arg is matrix, second arg is inverse inverse33 (imatrices.rgb_cam, imatrices.cam_rgb); - d1x = ! ri->get_model().compare("D1X"); + d1x = ! ri->get_model().compare ("D1X"); - if(ri->getSensorType() == ST_FUJI_XTRANS) { + if (ri->getSensorType() == ST_FUJI_XTRANS) { border = 7; - } else if(ri->getSensorType() == ST_FOVEON) { + } else if (ri->getSensorType() == ST_FOVEON) { border = 0; } @@ -1546,7 +1546,7 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) } // create profile - memset (imatrices.xyz_cam, 0, sizeof(imatrices.xyz_cam)); + memset (imatrices.xyz_cam, 0, sizeof (imatrices.xyz_cam)); for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) @@ -1558,18 +1558,18 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) inverse33 (imatrices.xyz_cam, imatrices.cam_xyz); for (int c = 0; c < 4; c++) { - c_white[c] = ri->get_white(c); + c_white[c] = ri->get_white (c); } // First we get the "as shot" ("Camera") white balance and store it float pre_mul[4]; // FIXME: get_colorsCoeff not so much used nowadays, when we have calculate_scale_mul() function here - ri->get_colorsCoeff( pre_mul, scale_mul, c_black, false);//modify for black level - camInitialGain = max(scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]) / min(scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); + ri->get_colorsCoeff ( pre_mul, scale_mul, c_black, false); //modify for black level + camInitialGain = max (scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]) / min (scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); - double camwb_red = ri->get_pre_mul(0) / pre_mul[0]; - double camwb_green = ri->get_pre_mul(1) / pre_mul[1]; - double camwb_blue = ri->get_pre_mul(2) / pre_mul[2]; + double camwb_red = ri->get_pre_mul (0) / pre_mul[0]; + double camwb_green = ri->get_pre_mul (1) / pre_mul[1]; + double camwb_blue = ri->get_pre_mul (2) / pre_mul[2]; double cam_r = imatrices.rgb_cam[0][0] * camwb_red + imatrices.rgb_cam[0][1] * camwb_green + imatrices.rgb_cam[0][2] * camwb_blue; double cam_g = imatrices.rgb_cam[1][0] * camwb_red + imatrices.rgb_cam[1][1] * camwb_green + imatrices.rgb_cam[1][2] * camwb_blue; double cam_b = imatrices.rgb_cam[2][0] * camwb_red + imatrices.rgb_cam[2][1] * camwb_green + imatrices.rgb_cam[2][2] * camwb_blue; @@ -1580,11 +1580,11 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) { // ...then we re-get the constants but now with auto which gives us better demosaicing and CA auto-correct // performance for strange white balance settings (such as UniWB) - ri->get_colorsCoeff( ref_pre_mul, scale_mul, c_black, true); - refwb_red = ri->get_pre_mul(0) / ref_pre_mul[0]; - refwb_green = ri->get_pre_mul(1) / ref_pre_mul[1]; - refwb_blue = ri->get_pre_mul(2) / ref_pre_mul[2]; - initialGain = max(scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]) / min(scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); + ri->get_colorsCoeff ( ref_pre_mul, scale_mul, c_black, true); + refwb_red = ri->get_pre_mul (0) / ref_pre_mul[0]; + refwb_green = ri->get_pre_mul (1) / ref_pre_mul[1]; + refwb_blue = ri->get_pre_mul (2) / ref_pre_mul[2]; + initialGain = max (scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]) / min (scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); ref_r = imatrices.rgb_cam[0][0] * refwb_red + imatrices.rgb_cam[0][1] * refwb_green + imatrices.rgb_cam[0][2] * refwb_blue; ref_g = imatrices.rgb_cam[1][0] * refwb_red + imatrices.rgb_cam[1][1] * refwb_green + imatrices.rgb_cam[1][2] * refwb_blue; ref_b = imatrices.rgb_cam[2][0] * refwb_red + imatrices.rgb_cam[2][1] * refwb_green + imatrices.rgb_cam[2][2] * refwb_blue; @@ -1592,8 +1592,8 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) } if (settings->verbose) { - printf("Raw As Shot White balance: temp %f, tint %f\n", camera_wb.getTemp(), camera_wb.getGreen()); - printf("Raw Reference (auto) white balance: temp %f, tint %f, multipliers [%f %f %f | %f %f %f]\n", ReferenceWB.getTemp(), ReferenceWB.getGreen(), ref_r, ref_g, ref_b, refwb_red, refwb_blue, refwb_green); + printf ("Raw As Shot White balance: temp %f, tint %f\n", camera_wb.getTemp(), camera_wb.getGreen()); + printf ("Raw Reference (auto) white balance: temp %f, tint %f, multipliers [%f %f %f | %f %f %f]\n", ReferenceWB.getTemp(), ReferenceWB.getGreen(), ref_r, ref_g, ref_b, refwb_red, refwb_blue, refwb_green); } /*{ @@ -1635,11 +1635,9 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) rml.ciffBase = ri->get_ciffBase(); rml.ciffLength = ri->get_ciffLen(); idata = new ImageData (fname, &rml); - Glib::ustring nana = ri->get_filename(); - printf("nana=%s \n", nana.c_str()); - green(W, H); - red(W, H); - blue(W, H); + green (W, H); + red (W, H); + blue (W, H); //hpmap = allocArray(W, H); if (plistener) { @@ -1649,8 +1647,8 @@ int RawImageSource::load (const Glib::ustring &fname, bool batch) plistener = nullptr; // This must be reset, because only load() is called through progressConnector t2.set(); - if( settings->verbose ) { - printf("Load %s: %d usec\n", fname.c_str(), t2.etime(t1)); + if ( settings->verbose ) { + printf ("Load %s: %d usec\n", fname.c_str(), t2.etime (t1)); } return 0; // OK! @@ -1668,37 +1666,37 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le RawImage *rid = nullptr; if (!raw.df_autoselect) { - if( !raw.dark_frame.empty()) { - rid = dfm.searchDarkFrame( raw.dark_frame ); + if ( !raw.dark_frame.empty()) { + rid = dfm.searchDarkFrame ( raw.dark_frame ); } } else { - rid = dfm.searchDarkFrame(idata->getMake(), idata->getModel(), idata->getISOSpeed(), idata->getShutterSpeed(), idata->getDateTimeAsTS()); + rid = dfm.searchDarkFrame (idata->getMake(), idata->getModel(), idata->getISOSpeed(), idata->getShutterSpeed(), idata->getDateTimeAsTS()); } - if( rid && settings->verbose) { - printf( "Subtracting Darkframe:%s\n", rid->get_filename().c_str()); + if ( rid && settings->verbose) { + printf ( "Subtracting Darkframe:%s\n", rid->get_filename().c_str()); } PixelsMap *bitmapBads = nullptr; int totBP = 0; // Hold count of bad pixels to correct - if(ri->zeroIsBad()) { // mark all pixels with value zero as bad, has to be called before FF and DF. dcraw sets this flag only for some cameras (mainly Panasonic and Leica) - bitmapBads = new PixelsMap(W, H); + if (ri->zeroIsBad()) { // mark all pixels with value zero as bad, has to be called before FF and DF. dcraw sets this flag only for some cameras (mainly Panasonic and Leica) + bitmapBads = new PixelsMap (W, H); #ifdef _OPENMP #pragma omp parallel for reduction(+:totBP) schedule(dynamic,16) #endif - for(int i = 0; i < H; i++) - for(int j = 0; j < W; j++) { - if(ri->data[i][j] == 0.f) { - bitmapBads->set(j, i); + for (int i = 0; i < H; i++) + for (int j = 0; j < W; j++) { + if (ri->data[i][j] == 0.f) { + bitmapBads->set (j, i); totBP++; } } - if( settings->verbose) { - printf( "%d pixels with value zero marked as bad pixels\n", totBP); + if ( settings->verbose) { + printf ( "%d pixels with value zero marked as bad pixels\n", totBP); } } @@ -1706,35 +1704,35 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le RawImage *rif = nullptr; if (!raw.ff_AutoSelect) { - if( !raw.ff_file.empty()) { - rif = ffm.searchFlatField( raw.ff_file ); + if ( !raw.ff_file.empty()) { + rif = ffm.searchFlatField ( raw.ff_file ); } } else { - rif = ffm.searchFlatField( idata->getMake(), idata->getModel(), idata->getLens(), idata->getFocalLen(), idata->getFNumber(), idata->getDateTimeAsTS()); + rif = ffm.searchFlatField ( idata->getMake(), idata->getModel(), idata->getLens(), idata->getFocalLen(), idata->getFNumber(), idata->getDateTimeAsTS()); } bool hasFlatField = (rif != nullptr); - if( hasFlatField && settings->verbose) { - printf( "Flat Field Correction:%s\n", rif->get_filename().c_str()); + if ( hasFlatField && settings->verbose) { + printf ( "Flat Field Correction:%s\n", rif->get_filename().c_str()); } - copyOriginalPixels(raw, ri, rid, rif); + copyOriginalPixels (raw, ri, rid, rif); //FLATFIELD end // Always correct camera badpixels from .badpixels file - std::vector *bp = dfm.getBadPixels( ri->get_maker(), ri->get_model(), idata->getSerialNumber() ); + std::vector *bp = dfm.getBadPixels ( ri->get_maker(), ri->get_model(), idata->getSerialNumber() ); - if( bp ) { - if(!bitmapBads) { - bitmapBads = new PixelsMap(W, H); + if ( bp ) { + if (!bitmapBads) { + bitmapBads = new PixelsMap (W, H); } - totBP += bitmapBads->set( *bp ); + totBP += bitmapBads->set ( *bp ); - if( settings->verbose ) { + if ( settings->verbose ) { std::cout << "Correcting " << bp->size() << " pixels from .badpixels" << std::endl; } } @@ -1742,33 +1740,33 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le // If darkframe selected, correct hotpixels found on darkframe bp = nullptr; - if( raw.df_autoselect ) { - bp = dfm.getHotPixels(idata->getMake(), idata->getModel(), idata->getISOSpeed(), idata->getShutterSpeed(), idata->getDateTimeAsTS()); - } else if( !raw.dark_frame.empty() ) { - bp = dfm.getHotPixels( raw.dark_frame ); + if ( raw.df_autoselect ) { + bp = dfm.getHotPixels (idata->getMake(), idata->getModel(), idata->getISOSpeed(), idata->getShutterSpeed(), idata->getDateTimeAsTS()); + } else if ( !raw.dark_frame.empty() ) { + bp = dfm.getHotPixels ( raw.dark_frame ); } - if(bp) { - if(!bitmapBads) { - bitmapBads = new PixelsMap(W, H); + if (bp) { + if (!bitmapBads) { + bitmapBads = new PixelsMap (W, H); } - totBP += bitmapBads->set( *bp ); + totBP += bitmapBads->set ( *bp ); - if( settings->verbose && !bp->empty()) { + if ( settings->verbose && !bp->empty()) { std::cout << "Correcting " << bp->size() << " hotpixels from darkframe" << std::endl; } } - scaleColors( 0, 0, W, H, raw); //+ + raw parameters for black level(raw.blackxx) + scaleColors ( 0, 0, W, H, raw); //+ + raw parameters for black level(raw.blackxx) // Correct vignetting of lens profile if (!hasFlatField && lensProf.useVign) { - LCPProfile *pLCPProf = lcpStore->getProfile(lensProf.lcpFile); + LCPProfile *pLCPProf = lcpStore->getProfile (lensProf.lcpFile); if (pLCPProf) { // don't check focal length to allow distortion correction for lenses without chip, also pass dummy focal length 1 in case of 0 - LCPMapper map(pLCPProf, max(idata->getFocalLen(), 1.0), idata->getFocalLen35mm(), idata->getFocusDist(), idata->getFNumber(), true, false, W, H, coarse, -1); + LCPMapper map (pLCPProf, max (idata->getFocalLen(), 1.0), idata->getFocalLen35mm(), idata->getFocusDist(), idata->getFNumber(), true, false, W, H, coarse, -1); #ifdef _OPENMP #pragma omp parallel for @@ -1777,7 +1775,7 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le for (int y = 0; y < H; y++) { for (int x = 0; x < W; x++) { if (rawData[y][x] > 0) { - rawData[y][x] *= map.calcVignetteFac(x, y); + rawData[y][x] *= map.calcVignetteFac (x, y); } } } @@ -1792,20 +1790,20 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le plistener->setProgress (0.0); } - if(!bitmapBads) { - bitmapBads = new PixelsMap(W, H); + if (!bitmapBads) { + bitmapBads = new PixelsMap (W, H); } - int nFound = findHotDeadPixels( *bitmapBads, raw.hotdeadpix_thresh, raw.hotPixelFilter, raw.deadPixelFilter ); + int nFound = findHotDeadPixels ( *bitmapBads, raw.hotdeadpix_thresh, raw.hotPixelFilter, raw.deadPixelFilter ); totBP += nFound; - if( settings->verbose && nFound > 0) { - printf( "Correcting %d hot/dead pixels found inside image\n", nFound ); + if ( settings->verbose && nFound > 0) { + printf ( "Correcting %d hot/dead pixels found inside image\n", nFound ); } } // check if it is an olympus E camera, if yes, compute G channel pre-compensation factors - if ( ri->getSensorType() == ST_BAYER && (raw.bayersensor.greenthresh || (((idata->getMake().size() >= 7 && idata->getMake().substr(0, 7) == "OLYMPUS" && idata->getModel()[0] == 'E') || (idata->getMake().size() >= 9 && idata->getMake().substr(0, 9) == "Panasonic")) && raw.bayersensor.method != RAWParams::BayerSensor::methodstring[ RAWParams::BayerSensor::vng4])) ) { + if ( ri->getSensorType() == ST_BAYER && (raw.bayersensor.greenthresh || (((idata->getMake().size() >= 7 && idata->getMake().substr (0, 7) == "OLYMPUS" && idata->getModel()[0] == 'E') || (idata->getMake().size() >= 9 && idata->getMake().substr (0, 9) == "Panasonic")) && raw.bayersensor.method != RAWParams::BayerSensor::methodstring[ RAWParams::BayerSensor::vng4])) ) { // global correction int ng1 = 0, ng2 = 0, i = 0; double avgg1 = 0., avgg2 = 0.; @@ -1816,7 +1814,7 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le for (i = border; i < H - border; i++) for (int j = border; j < W - border; j++) - if (ri->ISGREEN(i, j)) { + if (ri->ISGREEN (i, j)) { if (i & 1) { avgg2 += rawData[i][j]; ng2++; @@ -1835,9 +1833,9 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le for (int i = border; i < H - border; i++) for (int j = border; j < W - border; j++) - if (ri->ISGREEN(i, j)) { + if (ri->ISGREEN (i, j)) { float currData; - currData = (float)(rawData[i][j] * ((i & 1) ? corrg2 : corrg1)); + currData = (float) (rawData[i][j] * ((i & 1) ? corrg2 : corrg1)); rawData[i][j] = (currData); } } @@ -1848,17 +1846,17 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le plistener->setProgress (0.0); } - green_equilibrate(0.01 * (raw.bayersensor.greenthresh)); + green_equilibrate (0.01 * (raw.bayersensor.greenthresh)); } - if( totBP ) { + if ( totBP ) { if ( ri->getSensorType() == ST_BAYER ) { - interpolateBadPixelsBayer( *bitmapBads ); + interpolateBadPixelsBayer ( *bitmapBads ); } else if ( ri->getSensorType() == ST_FUJI_XTRANS ) { - interpolateBadPixelsXtrans( *bitmapBads ); + interpolateBadPixelsXtrans ( *bitmapBads ); } else { - interpolateBadPixelsNColours( *bitmapBads, ri->get_colors() ); + interpolateBadPixelsNColours ( *bitmapBads, ri->get_colors() ); } } @@ -1868,23 +1866,23 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le plistener->setProgress (0.0); } - cfa_linedn(0.00002 * (raw.bayersensor.linenoise)); + cfa_linedn (0.00002 * (raw.bayersensor.linenoise)); } - if ( (raw.ca_autocorrect || fabs(raw.cared) > 0.001 || fabs(raw.cablue) > 0.001) && ri->getSensorType() == ST_BAYER ) { // Auto CA correction disabled for X-Trans, for now... + if ( (raw.ca_autocorrect || fabs (raw.cared) > 0.001 || fabs (raw.cablue) > 0.001) && ri->getSensorType() == ST_BAYER ) { // Auto CA correction disabled for X-Trans, for now... if (plistener) { plistener->setProgressStr ("CA Auto Correction..."); plistener->setProgress (0.0); } - CA_correct_RT(raw.cared, raw.cablue, 10.0 - raw.caautostrength); + CA_correct_RT (raw.cared, raw.cablue, 10.0 - raw.caautostrength); } if ( raw.expos != 1 ) { - processRawWhitepoint(raw.expos, raw.preser); + processRawWhitepoint (raw.expos, raw.preser); } - if(prepareDenoise && dirpyrdenoiseExpComp == INFINITY) { + if (prepareDenoise && dirpyrdenoiseExpComp == INFINITY) { LUTu aehist; int aehistcompr; double clip = 0; @@ -1895,11 +1893,11 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le t2.set(); - if( settings->verbose ) { - printf("Preprocessing: %d usec\n", t2.etime(t1)); + if ( settings->verbose ) { + printf ("Preprocessing: %d usec\n", t2.etime (t1)); } - if(bitmapBads) { + if (bitmapBads) { delete bitmapBads; } @@ -1907,7 +1905,7 @@ void RawImageSource::preprocess (const RAWParams &raw, const LensProfParams &le } //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -void RawImageSource::demosaic(const RAWParams &raw) +void RawImageSource::demosaic (const RAWParams &raw) { MyTime t1, t2; t1.set(); @@ -1922,19 +1920,19 @@ void RawImageSource::demosaic(const RAWParams &raw) } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::amaze] ) { amaze_demosaic_RT (0, 0, W, H); } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::dcb] ) { - dcb_demosaic(raw.bayersensor.dcb_iterations, raw.bayersensor.dcb_enhance); + dcb_demosaic (raw.bayersensor.dcb_iterations, raw.bayersensor.dcb_enhance); } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::eahd]) { eahd_demosaic (); } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::igv]) { - igv_interpolate(W, H); + igv_interpolate (W, H); } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::lmmse]) { - lmmse_interpolate_omp(W, H, raw.bayersensor.lmmse_iterations); + lmmse_interpolate_omp (W, H, raw.bayersensor.lmmse_iterations); } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::fast] ) { fast_demosaic (0, 0, W, H); } else if (raw.bayersensor.method == RAWParams::BayerSensor::methodstring[RAWParams::BayerSensor::mono] ) { - nodemosaic(true); + nodemosaic (true); } else { - nodemosaic(false); + nodemosaic (false); } //if (raw.all_enhance) refinement_lassus(); @@ -1943,17 +1941,17 @@ void RawImageSource::demosaic(const RAWParams &raw) if (raw.xtranssensor.method == RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::fast] ) { fast_xtrans_interpolate(); } else if (raw.xtranssensor.method == RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::onePass]) { - xtrans_interpolate(1, false); + xtrans_interpolate (1, false); } else if (raw.xtranssensor.method == RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::threePass] ) { - xtrans_interpolate(3, true); - } else if(raw.xtranssensor.method == RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::mono] ) { - nodemosaic(true); + xtrans_interpolate (3, true); + } else if (raw.xtranssensor.method == RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::mono] ) { + nodemosaic (true); } else { - nodemosaic(false); + nodemosaic (false); } } else if (ri->get_colors() == 1) { // Monochrome - nodemosaic(true); + nodemosaic (true); } t2.set(); @@ -1962,18 +1960,18 @@ void RawImageSource::demosaic(const RAWParams &raw) rgbSourceModified = false; - if( settings->verbose ) { + if ( settings->verbose ) { if (getSensorType() == ST_BAYER) { - printf("Demosaicing Bayer data: %s - %d usec\n", raw.bayersensor.method.c_str(), t2.etime(t1)); + printf ("Demosaicing Bayer data: %s - %d usec\n", raw.bayersensor.method.c_str(), t2.etime (t1)); } else if (getSensorType() == ST_FUJI_XTRANS) { - printf("Demosaicing X-Trans data: %s - %d usec\n", raw.xtranssensor.method.c_str(), t2.etime(t1)); + printf ("Demosaicing X-Trans data: %s - %d usec\n", raw.xtranssensor.method.c_str(), t2.etime (t1)); } } } //void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexParams retinexParams, multi_array2D &conversionBuffer, LUTu &lhist16RETI) -void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexParams retinexParams, multi_array2D &conversionBuffer, LUTu &lhist16RETI) +void RawImageSource::retinexPrepareBuffers (ColorManagementParams cmp, RetinexParams retinexParams, multi_array2D &conversionBuffer, LUTu &lhist16RETI) { bool useHsl = (retinexParams.retinexcolorspace == "HSLLOG" || retinexParams.retinexcolorspace == "HSLLIN"); conversionBuffer[0] (W - 2 * border, H - 2 * border); @@ -1984,31 +1982,31 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar LUTf *retinexgamtab;//gamma before and after Retinex to restore tones LUTf lutTonereti; - if(retinexParams.gammaretinex == "low") { - retinexgamtab = &(Color::gammatab_115_2); - } else if(retinexParams.gammaretinex == "mid") { - retinexgamtab = &(Color::gammatab_13_2); - } else if(retinexParams.gammaretinex == "hig") { - retinexgamtab = &(Color::gammatab_145_3); - } else if(retinexParams.gammaretinex == "fre") { + if (retinexParams.gammaretinex == "low") { + retinexgamtab = & (Color::gammatab_115_2); + } else if (retinexParams.gammaretinex == "mid") { + retinexgamtab = & (Color::gammatab_13_2); + } else if (retinexParams.gammaretinex == "hig") { + retinexgamtab = & (Color::gammatab_145_3); + } else if (retinexParams.gammaretinex == "fre") { GammaValues g_a; double pwr = 1.0 / retinexParams.gam; double gamm = retinexParams.gam; double ts = retinexParams.slope; double gamm2 = retinexParams.gam; - if(gamm2 < 1.) { - std::swap(pwr, gamm); + if (gamm2 < 1.) { + std::swap (pwr, gamm); } int mode = 0, imax = 0; - Color::calcGamma(pwr, ts, mode, imax, g_a); // call to calcGamma with selected gamma and slope + Color::calcGamma (pwr, ts, mode, imax, g_a); // call to calcGamma with selected gamma and slope // printf("g_a0=%f g_a1=%f g_a2=%f g_a3=%f g_a4=%f\n", g_a0,g_a1,g_a2,g_a3,g_a4); double start; double add; - if(gamm2 < 1.) { + if (gamm2 < 1.) { start = g_a[2]; add = g_a[4]; } else { @@ -2018,19 +2016,19 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar double mul = 1. + g_a[4]; - lutTonereti(65536); + lutTonereti (65536); for (int i = 0; i < 65536; i++) { double val = (i) / 65535.; double x; - if(gamm2 < 1.) { + if (gamm2 < 1.) { x = Color::igammareti (val, gamm, start, ts, mul , add); } else { x = Color::gammareti (val, gamm, start, ts, mul , add); } - lutTonereti[i] = CLIP(x * 65535.);// CLIP avoid in some case extra values + lutTonereti[i] = CLIP (x * 65535.); // CLIP avoid in some case extra values } retinexgamtab = &lutTonereti; @@ -2075,7 +2073,7 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar } } */ - if(retinexParams.gammaretinex != "none" && retinexParams.str != 0) {//gamma + if (retinexParams.gammaretinex != "none" && retinexParams.str != 0) { //gamma #ifdef _OPENMP #pragma omp parallel for @@ -2095,7 +2093,7 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar } } - if(useHsl) { + if (useHsl) { #ifdef _OPENMP #pragma omp parallel #endif @@ -2103,14 +2101,14 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar // one LUT per thread LUTu lhist16RETIThr; - if(lhist16RETI) + if (lhist16RETI) { - lhist16RETIThr(lhist16RETI.getSize()); + lhist16RETIThr (lhist16RETI.getSize()); lhist16RETIThr.clear(); } #ifdef __SSE2__ - vfloat c32768 = F2V(32768.f); + vfloat c32768 = F2V (32768.f); #endif #ifdef _OPENMP #pragma omp for @@ -2123,15 +2121,15 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar for (; j < W - border - 3; j += 4) { vfloat H, S, L; - Color::rgb2hsl(LVFU(red[i][j]), LVFU(green[i][j]), LVFU(blue[i][j]), H, S, L); - STVFU(conversionBuffer[0][i - border][j - border], H); - STVFU(conversionBuffer[1][i - border][j - border], S); + Color::rgb2hsl (LVFU (red[i][j]), LVFU (green[i][j]), LVFU (blue[i][j]), H, S, L); + STVFU (conversionBuffer[0][i - border][j - border], H); + STVFU (conversionBuffer[1][i - border][j - border], S); L *= c32768; - STVFU(conversionBuffer[2][i - border][j - border], L); - STVFU(conversionBuffer[3][i - border][j - border], H); + STVFU (conversionBuffer[2][i - border][j - border], L); + STVFU (conversionBuffer[3][i - border][j - border], H); - if(lhist16RETI) { - for(int p = 0; p < 4; p++) { + if (lhist16RETI) { + for (int p = 0; p < 4; p++) { int pos = ( conversionBuffer[2][i - border][j - border + p]);//histogram in curve HSL lhist16RETIThr[pos]++; } @@ -2143,11 +2141,11 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar for (; j < W - border; j++) { float L; //rgb=>lab - Color::rgb2hslfloat(red[i][j], green[i][j], blue[i][j], conversionBuffer[0][i - border][j - border], conversionBuffer[1][i - border][j - border], L); + Color::rgb2hslfloat (red[i][j], green[i][j], blue[i][j], conversionBuffer[0][i - border][j - border], conversionBuffer[1][i - border][j - border], L); L *= 32768.f; conversionBuffer[2][i - border][j - border] = L; - if(lhist16RETI) { + if (lhist16RETI) { int pos = L; lhist16RETIThr[pos]++; } @@ -2157,7 +2155,7 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar #ifdef _OPENMP #pragma omp critical { - if(lhist16RETI) + if (lhist16RETI) { lhist16RETI += lhist16RETIThr; // Add per Thread LUT to global LUT } @@ -2168,9 +2166,9 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar } else { TMatrix wprof = iccStore->workingSpaceMatrix (cmp.working); float wp[3][3] = { - {static_cast(wprof[0][0]), static_cast(wprof[0][1]), static_cast(wprof[0][2])}, - {static_cast(wprof[1][0]), static_cast(wprof[1][1]), static_cast(wprof[1][2])}, - {static_cast(wprof[2][0]), static_cast(wprof[2][1]), static_cast(wprof[2][2])} + {static_cast (wprof[0][0]), static_cast (wprof[0][1]), static_cast (wprof[0][2])}, + {static_cast (wprof[1][0]), static_cast (wprof[1][1]), static_cast (wprof[1][2])}, + {static_cast (wprof[2][0]), static_cast (wprof[2][1]), static_cast (wprof[2][2])} }; // Conversion rgb -> lab is hard to vectorize because it uses a lut (that's not the main problem) @@ -2182,8 +2180,8 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar // one LUT per thread LUTu lhist16RETIThr; - if(lhist16RETI) { - lhist16RETIThr(lhist16RETI.getSize()); + if (lhist16RETI) { + lhist16RETIThr (lhist16RETI.getSize()); lhist16RETIThr.clear(); } @@ -2195,17 +2193,17 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar for (int j = border; j < W - border; j++) { float X, Y, Z, L, aa, bb; //rgb=>lab - Color::rgbxyz(red[i][j], green[i][j], blue[i][j], X, Y, Z, wp); + Color::rgbxyz (red[i][j], green[i][j], blue[i][j], X, Y, Z, wp); //convert Lab - Color::XYZ2Lab(X, Y, Z, L, aa, bb); + Color::XYZ2Lab (X, Y, Z, L, aa, bb); conversionBuffer[0][i - border][j - border] = aa; conversionBuffer[1][i - border][j - border] = bb; conversionBuffer[2][i - border][j - border] = L; - conversionBuffer[3][i - border][j - border] = xatan2f(bb, aa); + conversionBuffer[3][i - border][j - border] = xatan2f (bb, aa); // if(R_>40000.f && G_ > 30000.f && B_ > 30000.f) conversionBuffer[3][i - border][j - border] = R_; // else conversionBuffer[3][i - border][j - border] = 0.f; - if(lhist16RETI) { + if (lhist16RETI) { int pos = L; lhist16RETIThr[pos]++;//histogram in Curve Lab } @@ -2214,7 +2212,7 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar #ifdef _OPENMP #pragma omp critical { - if(lhist16RETI) { + if (lhist16RETI) { lhist16RETI += lhist16RETIThr; // Add per Thread LUT to global LUT } } @@ -2227,11 +2225,11 @@ void RawImageSource::retinexPrepareBuffers(ColorManagementParams cmp, RetinexPar } -void RawImageSource::retinexPrepareCurves(RetinexParams retinexParams, LUTf &cdcurve, LUTf &mapcurve, RetinextransmissionCurve &retinextransmissionCurve, RetinexgaintransmissionCurve &retinexgaintransmissionCurve, bool &retinexcontlutili, bool &mapcontlutili, bool &useHsl, LUTu & lhist16RETI, LUTu & histLRETI) +void RawImageSource::retinexPrepareCurves (RetinexParams retinexParams, LUTf &cdcurve, LUTf &mapcurve, RetinextransmissionCurve &retinextransmissionCurve, RetinexgaintransmissionCurve &retinexgaintransmissionCurve, bool &retinexcontlutili, bool &mapcontlutili, bool &useHsl, LUTu & lhist16RETI, LUTu & histLRETI) { useHsl = (retinexParams.retinexcolorspace == "HSLLOG" || retinexParams.retinexcolorspace == "HSLLIN"); - if(useHsl) { + if (useHsl) { CurveFactory::curveDehaContL (retinexcontlutili, retinexParams.cdHcurve, cdcurve, 1, lhist16RETI, histLRETI); } else { CurveFactory::curveDehaContL (retinexcontlutili, retinexParams.cdcurve, cdcurve, 1, lhist16RETI, histLRETI); @@ -2239,10 +2237,10 @@ void RawImageSource::retinexPrepareCurves(RetinexParams retinexParams, LUTf &cdc CurveFactory::mapcurve (mapcontlutili, retinexParams.mapcurve, mapcurve, 1, lhist16RETI, histLRETI); - retinexParams.getCurves(retinextransmissionCurve, retinexgaintransmissionCurve); + retinexParams.getCurves (retinextransmissionCurve, retinexgaintransmissionCurve); } -void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneCurveParams Tc, LUTf & cdcurve, LUTf & mapcurve, const RetinextransmissionCurve & dehatransmissionCurve, const RetinexgaintransmissionCurve & dehagaintransmissionCurve, multi_array2D &conversionBuffer, bool dehacontlutili, bool mapcontlutili, bool useHsl, float &minCD, float &maxCD, float &mini, float &maxi, float &Tmean, float &Tsigma, float &Tmin, float &Tmax, LUTu &histLRETI) +void RawImageSource::retinex (ColorManagementParams cmp, RetinexParams deh, ToneCurveParams Tc, LUTf & cdcurve, LUTf & mapcurve, const RetinextransmissionCurve & dehatransmissionCurve, const RetinexgaintransmissionCurve & dehagaintransmissionCurve, multi_array2D &conversionBuffer, bool dehacontlutili, bool mapcontlutili, bool useHsl, float &minCD, float &maxCD, float &mini, float &maxi, float &Tmean, float &Tsigma, float &Tmin, float &Tmax, LUTu &histLRETI) { MyTime t4, t5; t4.set(); @@ -2252,17 +2250,17 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC } LUTf lutToneireti; - lutToneireti(65536); + lutToneireti (65536); LUTf *retinexigamtab;//gamma before and after Retinex to restore tones - if(deh.gammaretinex == "low") { - retinexigamtab = &(Color::igammatab_115_2); - } else if(deh.gammaretinex == "mid") { - retinexigamtab = &(Color::igammatab_13_2); - } else if(deh.gammaretinex == "hig") { - retinexigamtab = &(Color::igammatab_145_3); - } else if(deh.gammaretinex == "fre") { + if (deh.gammaretinex == "low") { + retinexigamtab = & (Color::igammatab_115_2); + } else if (deh.gammaretinex == "mid") { + retinexigamtab = & (Color::igammatab_13_2); + } else if (deh.gammaretinex == "hig") { + retinexigamtab = & (Color::igammatab_145_3); + } else if (deh.gammaretinex == "fre") { GammaValues g_a; double pwr = 1.0 / deh.gam; double gamm = deh.gam; @@ -2270,17 +2268,17 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC double ts = deh.slope; int mode = 0, imax = 0; - if(gamm2 < 1.) { - std::swap(pwr, gamm); + if (gamm2 < 1.) { + std::swap (pwr, gamm); } - Color::calcGamma(pwr, ts, mode, imax, g_a); // call to calcGamma with selected gamma and slope + Color::calcGamma (pwr, ts, mode, imax, g_a); // call to calcGamma with selected gamma and slope double mul = 1. + g_a[4]; double add; double start; - if(gamm2 < 1.) { + if (gamm2 < 1.) { start = g_a[3]; add = g_a[3]; } else { @@ -2293,13 +2291,13 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC double val = (i) / 65535.; double x; - if(gamm2 < 1.) { + if (gamm2 < 1.) { x = Color::gammareti (val, gamm, start, ts, mul , add); } else { x = Color::igammareti (val, gamm, start, ts, mul , add); } - lutToneireti[i] = CLIP(x * 65535.); + lutToneireti[i] = CLIP (x * 65535.); } retinexigamtab = &lutToneireti; @@ -2315,18 +2313,18 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC LUTf dLcurve; LUTu hist16RET; - if(dehacontlutili && histLRETI) { - hist16RET(32768); + if (dehacontlutili && histLRETI) { + hist16RET (32768); hist16RET.clear(); histLRETI.clear(); - dLcurve(32768); + dLcurve (32768); } FlatCurve* chcurve = nullptr;//curve c=f(H) bool chutili = false; if (deh.enabled && deh.retinexMethod == "highli") { - chcurve = new FlatCurve(deh.lhcurve); + chcurve = new FlatCurve (deh.lhcurve); if (!chcurve || chcurve->isIdentity()) { if (chcurve) { @@ -2347,8 +2345,8 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC // one LUT per thread LUTu hist16RETThr; - if(hist16RET) { - hist16RETThr(hist16RET.getSize()); + if (hist16RET) { + hist16RETThr (hist16RET.getSize()); hist16RETThr.clear(); } @@ -2357,11 +2355,11 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC #endif for (int i = 0; i < H - 2 * border; i++ ) - if(dehacontlutili) + if (dehacontlutili) for (int j = 0; j < W - 2 * border; j++) { LBuffer[i][j] = cdcurve[2.f * temp[i][j]] / 2.f; - if(histLRETI) { + if (histLRETI) { int pos = LBuffer[i][j]; hist16RETThr[pos]++; //histogram in Curve } @@ -2375,13 +2373,13 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC #pragma omp critical #endif { - if(hist16RET) { + if (hist16RET) { hist16RET += hist16RETThr; // Add per Thread LUT to global LUT } } } - if(hist16RET) {//update histogram + if (hist16RET) { //update histogram // TODO : When rgbcurvesspeedup branch is merged into master, replace this by the following 1-liner // hist16RET.compressTo(histLRETI); // also remove declaration and init of dLcurve some lines above then and finally remove this comment :) @@ -2392,15 +2390,15 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC for (int i = 0; i < 32768; i++) { float hval = dLcurve[i]; - int hi = (int)(255.0f * hval); + int hi = (int) (255.0f * hval); histLRETI[hi] += hist16RET[i]; } } - MSR(LBuffer, conversionBuffer[2], conversionBuffer[3], mapcurve, mapcontlutili, WNew, HNew, deh, dehatransmissionCurve, dehagaintransmissionCurve, minCD, maxCD, mini, maxi, Tmean, Tsigma, Tmin, Tmax); + MSR (LBuffer, conversionBuffer[2], conversionBuffer[3], mapcurve, mapcontlutili, WNew, HNew, deh, dehatransmissionCurve, dehagaintransmissionCurve, minCD, maxCD, mini, maxi, Tmean, Tsigma, Tmin, Tmax); - if(useHsl) { - if(chutili) { + if (useHsl) { + if (chutili) { #ifdef _OPENMP #pragma omp parallel for #endif @@ -2410,7 +2408,7 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC for (; j < W - border; j++) { - float valp = (chcurve->getVal(conversionBuffer[3][i - border][j - border]) - 0.5f); + float valp = (chcurve->getVal (conversionBuffer[3][i - border][j - border]) - 0.5f); conversionBuffer[1][i - border][j - border] *= (1.f + 2.f * valp); } @@ -2424,21 +2422,21 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC for (int i = border; i < H - border; i++ ) { int j = border; #ifdef __SSE2__ - vfloat c32768 = F2V(32768.f); + vfloat c32768 = F2V (32768.f); for (; j < W - border - 3; j += 4) { vfloat R, G, B; - Color::hsl2rgb(LVFU(conversionBuffer[0][i - border][j - border]), LVFU(conversionBuffer[1][i - border][j - border]), LVFU(LBuffer[i - border][j - border]) / c32768, R, G, B); + Color::hsl2rgb (LVFU (conversionBuffer[0][i - border][j - border]), LVFU (conversionBuffer[1][i - border][j - border]), LVFU (LBuffer[i - border][j - border]) / c32768, R, G, B); - STVFU(red[i][j], R); - STVFU(green[i][j], G); - STVFU(blue[i][j], B); + STVFU (red[i][j], R); + STVFU (green[i][j], G); + STVFU (blue[i][j], B); } #endif for (; j < W - border; j++) { - Color::hsl2rgbfloat(conversionBuffer[0][i - border][j - border], conversionBuffer[1][i - border][j - border], LBuffer[i - border][j - border] / 32768.f, red[i][j], green[i][j], blue[i][j]); + Color::hsl2rgbfloat (conversionBuffer[0][i - border][j - border], conversionBuffer[1][i - border][j - border], LBuffer[i - border][j - border] / 32768.f, red[i][j], green[i][j], blue[i][j]); } } @@ -2462,8 +2460,8 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC float sqrtBuffer[W] ALIGNED16; float sincosxBuffer[W] ALIGNED16; float sincosyBuffer[W] ALIGNED16; - const vfloat c327d68v = F2V(327.68); - const vfloat onev = F2V(1.f); + const vfloat c327d68v = F2V (327.68); + const vfloat onev = F2V (1.f); #endif // __SSE2__ #ifdef _OPENMP #pragma omp for @@ -2477,29 +2475,29 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC for (; j < W - border - 3; j += 4) { - vfloat av = LVFU(conversionBuffer[0][i - border][j - border]); - vfloat bv = LVFU(conversionBuffer[1][i - border][j - border]); - vfloat chprovv = vsqrtf(SQRV(av) + SQRV(bv)); - STVF(sqrtBuffer[j - border], chprovv / c327d68v); - vfloat HHv = xatan2f(bv, av); - STVF(atan2Buffer[j - border], HHv); + vfloat av = LVFU (conversionBuffer[0][i - border][j - border]); + vfloat bv = LVFU (conversionBuffer[1][i - border][j - border]); + vfloat chprovv = vsqrtf (SQRV (av) + SQRV (bv)); + STVF (sqrtBuffer[j - border], chprovv / c327d68v); + vfloat HHv = xatan2f (bv, av); + STVF (atan2Buffer[j - border], HHv); av /= chprovv; bv /= chprovv; - vmask selMask = vmaskf_eq(chprovv, ZEROV); - STVF(sincosyBuffer[j - border], vself(selMask, onev, av)); - STVF(sincosxBuffer[j - border], vselfnotzero(selMask, bv)); + vmask selMask = vmaskf_eq (chprovv, ZEROV); + STVF (sincosyBuffer[j - border], vself (selMask, onev, av)); + STVF (sincosxBuffer[j - border], vselfnotzero (selMask, bv)); } for (; j < W - border; j++) { float aa = conversionBuffer[0][i - border][j - border]; float bb = conversionBuffer[1][i - border][j - border]; - float Chprov1 = sqrt(SQR(aa) + SQR(bb)) / 327.68f; + float Chprov1 = sqrt (SQR (aa) + SQR (bb)) / 327.68f; sqrtBuffer[j - border] = Chprov1; - float HH = xatan2f(bb, aa); + float HH = xatan2f (bb, aa); atan2Buffer[j - border] = HH; - if(Chprov1 == 0.0f) { + if (Chprov1 == 0.0f) { sincosyBuffer[j - border] = 1.f; sincosxBuffer[j - border] = 0.0f; } else { @@ -2522,11 +2520,11 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC #else float aa = conversionBuffer[0][i - border][j - border]; float bb = conversionBuffer[1][i - border][j - border]; - float Chprov1 = sqrt(SQR(aa) + SQR(bb)) / 327.68f; - float HH = xatan2f(bb, aa); + float Chprov1 = sqrt (SQR (aa) + SQR (bb)) / 327.68f; + float HH = xatan2f (bb, aa); float2 sincosval;// = xsincosf(HH); - if(Chprov1 == 0.0f) { + if (Chprov1 == 0.0f) { sincosval.y = 1.f; sincosval.x = 0.0f; } else { @@ -2536,8 +2534,8 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC #endif - if(chutili) { // c=f(H) - float valp = float((chcurve->getVal(Color::huelab_to_huehsv2(HH)) - 0.5f)); + if (chutili) { // c=f(H) + float valp = float ((chcurve->getVal (Color::huelab_to_huehsv2 (HH)) - 0.5f)); Chprov1 *= (1.f + 2.f * valp); } @@ -2546,10 +2544,10 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC bool neg = false; bool more_rgb = false; //gamut control : Lab values are in gamut - Color::gamutLchonly(HH, sincosval, Lprov1, Chprov1, R, G, B, wip, highlight, 0.15f, 0.96f, neg, more_rgb); + Color::gamutLchonly (HH, sincosval, Lprov1, Chprov1, R, G, B, wip, highlight, 0.15f, 0.96f, neg, more_rgb); #else //gamut control : Lab values are in gamut - Color::gamutLchonly(HH, sincosval, Lprov1, Chprov1, R, G, B, wip, highlight, 0.15f, 0.96f); + Color::gamutLchonly (HH, sincosval, Lprov1, Chprov1, R, G, B, wip, highlight, 0.15f, 0.96f); #endif @@ -2564,9 +2562,9 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC #ifdef __SSE2__ vfloat wipv[3][3]; - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) { - wipv[i][j] = F2V(wiprof[i][j]); + for (int i = 0; i < 3; i++) + for (int j = 0; j < 3; j++) { + wipv[i][j] = F2V (wiprof[i][j]); } #endif // __SSE2__ @@ -2581,12 +2579,12 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC for (; j < W - border - 3; j += 4) { vfloat x_, y_, z_; vfloat R, G, B; - Color::Lab2XYZ(LVFU(LBuffer[i - border][j - border]), LVFU(conversionBuffer[0][i - border][j - border]), LVFU(conversionBuffer[1][i - border][j - border]), x_, y_, z_) ; - Color::xyz2rgb(x_, y_, z_, R, G, B, wipv); + Color::Lab2XYZ (LVFU (LBuffer[i - border][j - border]), LVFU (conversionBuffer[0][i - border][j - border]), LVFU (conversionBuffer[1][i - border][j - border]), x_, y_, z_) ; + Color::xyz2rgb (x_, y_, z_, R, G, B, wipv); - STVFU(red[i][j], R); - STVFU(green[i][j], G); - STVFU(blue[i][j], B); + STVFU (red[i][j], R); + STVFU (green[i][j], G); + STVFU (blue[i][j], B); } @@ -2595,8 +2593,8 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC for (; j < W - border; j++) { float x_, y_, z_; float R, G, B; - Color::Lab2XYZ(LBuffer[i - border][j - border], conversionBuffer[0][i - border][j - border], conversionBuffer[1][i - border][j - border], x_, y_, z_) ; - Color::xyz2rgb(x_, y_, z_, R, G, B, wip); + Color::Lab2XYZ (LBuffer[i - border][j - border], conversionBuffer[0][i - border][j - border], conversionBuffer[1][i - border][j - border], x_, y_, z_) ; + Color::xyz2rgb (x_, y_, z_, R, G, B, wip); red[i][j] = R; green[i][j] = G; blue[i][j] = B; @@ -2608,7 +2606,7 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC delete chcurve; } - if(deh.gammaretinex != "none" && deh.str != 0) { //inverse gamma + if (deh.gammaretinex != "none" && deh.str != 0) { //inverse gamma #ifdef _OPENMP #pragma omp parallel for #endif @@ -2630,43 +2628,43 @@ void RawImageSource::retinex(ColorManagementParams cmp, RetinexParams deh, ToneC t5.set(); - if( settings->verbose ) { - printf("Retinex=%d usec\n", t5.etime(t4)); + if ( settings->verbose ) { + printf ("Retinex=%d usec\n", t5.etime (t4)); } } void RawImageSource::flushRawData() { - if(cache) { + if (cache) { delete [] cache; cache = nullptr; } if (rawData) { - rawData(0, 0); + rawData (0, 0); } } void RawImageSource::flushRGB() { if (green) { - green(0, 0); + green (0, 0); } if (red) { - red(0, 0); + red (0, 0); } if (blue) { - blue(0, 0); + blue (0, 0); } } -void RawImageSource::HLRecovery_Global(ToneCurveParams hrp) +void RawImageSource::HLRecovery_Global (ToneCurveParams hrp) { if (hrp.hrenabled && hrp.method == "Color") { - if(!rgbSourceModified) { + if (!rgbSourceModified) { if (settings->verbose) { printf ("Applying Highlight Recovery: Color propagation...\n"); } @@ -2679,7 +2677,7 @@ void RawImageSource::HLRecovery_Global(ToneCurveParams hrp) } -void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile, unsigned short black[4]) +void RawImageSource::processFlatField (const RAWParams &raw, RawImage *riFlatFile, unsigned short black[4]) { // BENCHFUN float *cfablur = (float (*)) malloc (H * W * sizeof * cfablur); @@ -2688,17 +2686,17 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile //function call to cfabloxblur if (raw.ff_BlurType == RAWParams::ff_BlurTypestring[RAWParams::v_ff]) { - cfaboxblur(riFlatFile, cfablur, 2 * BS, 0); + cfaboxblur (riFlatFile, cfablur, 2 * BS, 0); } else if (raw.ff_BlurType == RAWParams::ff_BlurTypestring[RAWParams::h_ff]) { - cfaboxblur(riFlatFile, cfablur, 0, 2 * BS); + cfaboxblur (riFlatFile, cfablur, 0, 2 * BS); } else if (raw.ff_BlurType == RAWParams::ff_BlurTypestring[RAWParams::vh_ff]) { //slightly more complicated blur if trying to correct both vertical and horizontal anomalies - cfaboxblur(riFlatFile, cfablur, BS, BS); //first do area blur to correct vignette + cfaboxblur (riFlatFile, cfablur, BS, BS); //first do area blur to correct vignette } else { //(raw.ff_BlurType == RAWParams::ff_BlurTypestring[RAWParams::area_ff]) - cfaboxblur(riFlatFile, cfablur, BS, BS); + cfaboxblur (riFlatFile, cfablur, BS, BS); } - if(ri->getSensorType() == ST_BAYER) { + if (ri->getSensorType() == ST_BAYER) { float refcolor[2][2]; //find centre average values by channel @@ -2706,21 +2704,21 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile for (int n = 0; n < 2; n++) { int row = 2 * (H >> 2) + m; int col = 2 * (W >> 2) + n; - int c = FC(row, col); - int c4 = ( c == 1 && !(row & 1) ) ? 3 : c; - refcolor[m][n] = max(0.0f, cfablur[row * W + col] - black[c4]); + int c = FC (row, col); + int c4 = ( c == 1 && ! (row & 1) ) ? 3 : c; + refcolor[m][n] = max (0.0f, cfablur[row * W + col] - black[c4]); } float limitFactor = 1.f; - if(raw.ff_AutoClipControl) { + if (raw.ff_AutoClipControl) { int clipControlGui = 0; for (int m = 0; m < 2; m++) for (int n = 0; n < 2; n++) { float maxval = 0.f; - int c = FC(m, n); - int c4 = ( c == 1 && !(m & 1) ) ? 3 : c; + int c = FC (m, n); + int c4 = ( c == 1 && ! (m & 1) ) ? 3 : c; #ifdef _OPENMP #pragma omp parallel #endif @@ -2732,9 +2730,9 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile for (int row = 0; row < H - m; row += 2) { for (int col = 0; col < W - n; col += 2) { - float tempval = (rawData[row + m][col + n] - black[c4]) * ( refcolor[m][n] / max(1e-5f, cfablur[(row + m) * W + col + n] - black[c4]) ); + float tempval = (rawData[row + m][col + n] - black[c4]) * ( refcolor[m][n] / max (1e-5f, cfablur[ (row + m) * W + col + n] - black[c4]) ); - if(tempval > maxvalthr) { + if (tempval > maxvalthr) { maxvalthr = tempval; } } @@ -2745,7 +2743,7 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile #endif { - if(maxvalthr > maxval) { + if (maxvalthr > maxval) { maxval = maxvalthr; } @@ -2754,14 +2752,14 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile // now we have the max value for the channel // if it clips, calculate factor to avoid clipping - if(maxval + black[c4] >= ri->get_white(c4)) { - limitFactor = min(limitFactor, ri->get_white(c4) / (maxval + black[c4])); + if (maxval + black[c4] >= ri->get_white (c4)) { + limitFactor = min (limitFactor, ri->get_white (c4) / (maxval + black[c4])); } } clipControlGui = (1.f - limitFactor) * 100.f; // this value can be used to set the clip control slider in gui } else { - limitFactor = max((float)(100 - raw.ff_clipControl) / 100.f, 0.01f); + limitFactor = max ((float) (100 - raw.ff_clipControl) / 100.f, 0.01f); } for (int m = 0; m < 2; m++) @@ -2770,7 +2768,7 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile } - unsigned int c[2][2] = {{FC(0, 0), FC(0, 1)}, {FC(1, 0), FC(1, 1)}}; + unsigned int c[2][2] = {{FC (0, 0), FC (0, 1)}, {FC (1, 0), FC (1, 1)}}; unsigned int c4[2][2]; c4[0][0] = ( c[0][0] == 1) ? 3 : c[0][0]; c4[0][1] = ( c[0][1] == 1) ? 3 : c[0][1]; @@ -2778,14 +2776,14 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile c4[1][1] = c[1][1]; #ifdef __SSE2__ - vfloat refcolorv[2] = {_mm_set_ps(refcolor[0][1], refcolor[0][0], refcolor[0][1], refcolor[0][0]), - _mm_set_ps(refcolor[1][1], refcolor[1][0], refcolor[1][1], refcolor[1][0]) + vfloat refcolorv[2] = {_mm_set_ps (refcolor[0][1], refcolor[0][0], refcolor[0][1], refcolor[0][0]), + _mm_set_ps (refcolor[1][1], refcolor[1][0], refcolor[1][1], refcolor[1][0]) }; - vfloat blackv[2] = {_mm_set_ps(black[c4[0][1]], black[c4[0][0]], black[c4[0][1]], black[c4[0][0]]), - _mm_set_ps(black[c4[1][1]], black[c4[1][0]], black[c4[1][1]], black[c4[1][0]]) + vfloat blackv[2] = {_mm_set_ps (black[c4[0][1]], black[c4[0][0]], black[c4[0][1]], black[c4[0][0]]), + _mm_set_ps (black[c4[1][1]], black[c4[1][0]], black[c4[1][1]], black[c4[1][0]]) }; - vfloat epsv = F2V(1e-5f); + vfloat epsv = F2V (1e-5f); #endif #ifdef _OPENMP #pragma omp parallel for schedule(dynamic,16) @@ -2798,20 +2796,20 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile vfloat rowRefcolorv = refcolorv[row & 1]; for (; col < W - 3; col += 4) { - vfloat vignettecorrv = rowRefcolorv / vmaxf(epsv, LVFU(cfablur[(row) * W + col]) - rowBlackv); - vfloat valv = LVFU(rawData[row][col]); + vfloat vignettecorrv = rowRefcolorv / vmaxf (epsv, LVFU (cfablur[ (row) * W + col]) - rowBlackv); + vfloat valv = LVFU (rawData[row][col]); valv -= rowBlackv; - STVFU(rawData[row][col], valv * vignettecorrv + rowBlackv); + STVFU (rawData[row][col], valv * vignettecorrv + rowBlackv); } #endif for (; col < W; col ++) { - float vignettecorr = refcolor[row & 1][col & 1] / max(1e-5f, cfablur[(row) * W + col] - black[c4[row & 1][col & 1]]); + float vignettecorr = refcolor[row & 1][col & 1] / max (1e-5f, cfablur[ (row) * W + col] - black[c4[row & 1][col & 1]]); rawData[row][col] = (rawData[row][col] - black[c4[row & 1][col & 1]]) * vignettecorr + black[c4[row & 1][col & 1]]; } } - } else if(ri->getSensorType() == ST_FUJI_XTRANS) { + } else if (ri->getSensorType() == ST_FUJI_XTRANS) { float refcolor[3] = {0.f}; int cCount[3] = {0}; @@ -2820,18 +2818,18 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile for (int n = -3; n < 3; n++) { int row = 2 * (H >> 2) + m; int col = 2 * (W >> 2) + n; - int c = riFlatFile->XTRANSFC(row, col); - refcolor[c] += max(0.0f, cfablur[row * W + col] - black[c]); + int c = riFlatFile->XTRANSFC (row, col); + refcolor[c] += max (0.0f, cfablur[row * W + col] - black[c]); cCount[c] ++; } - for(int c = 0; c < 3; c++) { + for (int c = 0; c < 3; c++) { refcolor[c] = refcolor[c] / cCount[c]; } float limitFactor = 1.f; - if(raw.ff_AutoClipControl) { + if (raw.ff_AutoClipControl) { // determine maximum calculated value to avoid clipping // int clipControlGui = 0; float maxval = 0.f; @@ -2847,9 +2845,9 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { - float tempval = (rawData[row][col] - black[0]) * ( refcolor[ri->XTRANSFC(row, col)] / max(1e-5f, cfablur[(row) * W + col] - black[0]) ); + float tempval = (rawData[row][col] - black[0]) * ( refcolor[ri->XTRANSFC (row, col)] / max (1e-5f, cfablur[ (row) * W + col] - black[0]) ); - if(tempval > maxvalthr) { + if (tempval > maxvalthr) { maxvalthr = tempval; } } @@ -2859,23 +2857,23 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile #pragma omp critical #endif { - if(maxvalthr > maxval) { + if (maxvalthr > maxval) { maxval = maxvalthr; } } } // there's only one white level for xtrans - if(maxval + black[0] > ri->get_white(0)) { - limitFactor = ri->get_white(0) / (maxval + black[0]); + if (maxval + black[0] > ri->get_white (0)) { + limitFactor = ri->get_white (0) / (maxval + black[0]); // clipControlGui = (1.f - limitFactor) * 100.f; // this value can be used to set the clip control slider in gui } } else { - limitFactor = max((float)(100 - raw.ff_clipControl) / 100.f, 0.01f); + limitFactor = max ((float) (100 - raw.ff_clipControl) / 100.f, 0.01f); } - for(int c = 0; c < 3; c++) { + for (int c = 0; c < 3; c++) { refcolor[c] *= limitFactor; } @@ -2885,8 +2883,8 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { - int c = ri->XTRANSFC(row, col); - float vignettecorr = ( refcolor[c] / max(1e-5f, cfablur[(row) * W + col] - black[c]) ); + int c = ri->XTRANSFC (row, col); + float vignettecorr = ( refcolor[c] / max (1e-5f, cfablur[ (row) * W + col] - black[c]) ); rawData[row][col] = (rawData[row][col] - black[c]) * vignettecorr + black[c]; } } @@ -2896,11 +2894,11 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile float *cfablur1 = (float (*)) malloc (H * W * sizeof * cfablur1); float *cfablur2 = (float (*)) malloc (H * W * sizeof * cfablur2); //slightly more complicated blur if trying to correct both vertical and horizontal anomalies - cfaboxblur(riFlatFile, cfablur1, 0, 2 * BS); //now do horizontal blur - cfaboxblur(riFlatFile, cfablur2, 2 * BS, 0); //now do vertical blur + cfaboxblur (riFlatFile, cfablur1, 0, 2 * BS); //now do horizontal blur + cfaboxblur (riFlatFile, cfablur2, 2 * BS, 0); //now do vertical blur - if(ri->getSensorType() == ST_BAYER) { - unsigned int c[2][2] = {{FC(0, 0), FC(0, 1)}, {FC(1, 0), FC(1, 1)}}; + if (ri->getSensorType() == ST_BAYER) { + unsigned int c[2][2] = {{FC (0, 0), FC (0, 1)}, {FC (1, 0), FC (1, 1)}}; unsigned int c4[2][2]; c4[0][0] = ( c[0][0] == 1) ? 3 : c[0][0]; c4[0][1] = ( c[0][1] == 1) ? 3 : c[0][1]; @@ -2908,11 +2906,11 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile c4[1][1] = c[1][1]; #ifdef __SSE2__ - vfloat blackv[2] = {_mm_set_ps(black[c4[0][1]], black[c4[0][0]], black[c4[0][1]], black[c4[0][0]]), - _mm_set_ps(black[c4[1][1]], black[c4[1][0]], black[c4[1][1]], black[c4[1][0]]) + vfloat blackv[2] = {_mm_set_ps (black[c4[0][1]], black[c4[0][0]], black[c4[0][1]], black[c4[0][0]]), + _mm_set_ps (black[c4[1][1]], black[c4[1][0]], black[c4[1][1]], black[c4[1][0]]) }; - vfloat epsv = F2V(1e-5f); + vfloat epsv = F2V (1e-5f); #endif #ifdef _OPENMP #pragma omp parallel for schedule(dynamic,16) @@ -2924,31 +2922,31 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile vfloat rowBlackv = blackv[row & 1]; for (; col < W - 3; col += 4) { - vfloat linecorrv = SQRV(vmaxf(epsv, LVFU(cfablur[row * W + col]) - rowBlackv)) / - (vmaxf(epsv, LVFU(cfablur1[row * W + col]) - rowBlackv) * vmaxf(epsv, LVFU(cfablur2[row * W + col]) - rowBlackv)); - vfloat valv = LVFU(rawData[row][col]); + vfloat linecorrv = SQRV (vmaxf (epsv, LVFU (cfablur[row * W + col]) - rowBlackv)) / + (vmaxf (epsv, LVFU (cfablur1[row * W + col]) - rowBlackv) * vmaxf (epsv, LVFU (cfablur2[row * W + col]) - rowBlackv)); + vfloat valv = LVFU (rawData[row][col]); valv -= rowBlackv; - STVFU(rawData[row][col], valv * linecorrv + rowBlackv); + STVFU (rawData[row][col], valv * linecorrv + rowBlackv); } #endif for (; col < W; col ++) { - float linecorr = SQR(max(1e-5f, cfablur[row * W + col] - black[c4[row & 1][col & 1]])) / - (max(1e-5f, cfablur1[row * W + col] - black[c4[row & 1][col & 1]]) * max(1e-5f, cfablur2[row * W + col] - black[c4[row & 1][col & 1]])) ; + float linecorr = SQR (max (1e-5f, cfablur[row * W + col] - black[c4[row & 1][col & 1]])) / + (max (1e-5f, cfablur1[row * W + col] - black[c4[row & 1][col & 1]]) * max (1e-5f, cfablur2[row * W + col] - black[c4[row & 1][col & 1]])) ; rawData[row][col] = (rawData[row][col] - black[c4[row & 1][col & 1]]) * linecorr + black[c4[row & 1][col & 1]]; } } - } else if(ri->getSensorType() == ST_FUJI_XTRANS) { + } else if (ri->getSensorType() == ST_FUJI_XTRANS) { #ifdef _OPENMP #pragma omp parallel for #endif for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { - int c = ri->XTRANSFC(row, col); - float hlinecorr = (max(1e-5f, cfablur[(row) * W + col] - black[c]) / max(1e-5f, cfablur1[(row) * W + col] - black[c]) ); - float vlinecorr = (max(1e-5f, cfablur[(row) * W + col] - black[c]) / max(1e-5f, cfablur2[(row) * W + col] - black[c]) ); + int c = ri->XTRANSFC (row, col); + float hlinecorr = (max (1e-5f, cfablur[ (row) * W + col] - black[c]) / max (1e-5f, cfablur1[ (row) * W + col] - black[c]) ); + float vlinecorr = (max (1e-5f, cfablur[ (row) * W + col] - black[c]) / max (1e-5f, cfablur2[ (row) * W + col] - black[c]) ); rawData[row][col] = ((rawData[row][col] - black[c]) * hlinecorr * vlinecorr + black[c]); } } @@ -2967,25 +2965,25 @@ void RawImageSource::processFlatField(const RAWParams &raw, RawImage *riFlatFile /* Copy original pixel data and * subtract dark frame (if present) from current image and apply flat field correction (if present) */ -void RawImageSource::copyOriginalPixels(const RAWParams &raw, RawImage *src, RawImage *riDark, RawImage *riFlatFile ) +void RawImageSource::copyOriginalPixels (const RAWParams &raw, RawImage *src, RawImage *riDark, RawImage *riFlatFile ) { // TODO: Change type of black[] to float to avoid conversions unsigned short black[4] = { - (unsigned short)ri->get_cblack(0), (unsigned short)ri->get_cblack(1), - (unsigned short)ri->get_cblack(2), (unsigned short)ri->get_cblack(3) + (unsigned short)ri->get_cblack (0), (unsigned short)ri->get_cblack (1), + (unsigned short)ri->get_cblack (2), (unsigned short)ri->get_cblack (3) }; if (ri->getSensorType() == ST_BAYER || ri->getSensorType() == ST_FUJI_XTRANS) { if (!rawData) { - rawData(W, H); + rawData (W, H); } if (riDark && W == riDark->get_width() && H == riDark->get_height()) { // This works also for xtrans-sensors, because black[0] to black[4] are equal for these for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { - int c = FC(row, col); - int c4 = ( c == 1 && !(row & 1) ) ? 3 : c; - rawData[row][col] = max(src->data[row][col] + black[c4] - riDark->data[row][col], 0.0f); + int c = FC (row, col); + int c4 = ( c == 1 && ! (row & 1) ) ? 3 : c; + rawData[row][col] = max (src->data[row][col] + black[c4] - riDark->data[row][col], 0.0f); } } } else { @@ -3002,18 +3000,18 @@ void RawImageSource::copyOriginalPixels(const RAWParams &raw, RawImage *src, Raw if (riFlatFile && W == riFlatFile->get_width() && H == riFlatFile->get_height()) { - processFlatField(raw, riFlatFile, black); + processFlatField (raw, riFlatFile, black); } // flatfield } else if (ri->get_colors() == 1) { // Monochrome if (!rawData) { - rawData(W, H); + rawData (W, H); } if (riDark && W == riDark->get_width() && H == riDark->get_height()) { for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { - rawData[row][col] = max(src->data[row][col] + black[0] - riDark->data[row][col], 0.0f); + rawData[row][col] = max (src->data[row][col] + black[0] - riDark->data[row][col], 0.0f); } } } else { @@ -3027,17 +3025,17 @@ void RawImageSource::copyOriginalPixels(const RAWParams &raw, RawImage *src, Raw // No bayer pattern // TODO: Is there a flat field correction possible? if (!rawData) { - rawData(3 * W, H); + rawData (3 * W, H); } if (riDark && W == riDark->get_width() && H == riDark->get_height()) { for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { - int c = FC(row, col); - int c4 = ( c == 1 && !(row & 1) ) ? 3 : c; - rawData[row][3 * col + 0] = max(src->data[row][3 * col + 0] + black[c4] - riDark->data[row][3 * col + 0], 0.0f); - rawData[row][3 * col + 1] = max(src->data[row][3 * col + 1] + black[c4] - riDark->data[row][3 * col + 1], 0.0f); - rawData[row][3 * col + 2] = max(src->data[row][3 * col + 2] + black[c4] - riDark->data[row][3 * col + 2], 0.0f); + int c = FC (row, col); + int c4 = ( c == 1 && ! (row & 1) ) ? 3 : c; + rawData[row][3 * col + 0] = max (src->data[row][3 * col + 0] + black[c4] - riDark->data[row][3 * col + 0], 0.0f); + rawData[row][3 * col + 1] = max (src->data[row][3 * col + 1] + black[c4] - riDark->data[row][3 * col + 1], 0.0f); + rawData[row][3 * col + 2] = max (src->data[row][3 * col + 2] + black[c4] - riDark->data[row][3 * col + 2], 0.0f); } } } else { @@ -3052,11 +3050,11 @@ void RawImageSource::copyOriginalPixels(const RAWParams &raw, RawImage *src, Raw } } -SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur, const int boxH, const int boxW) +SSEFUNCTION void RawImageSource::cfaboxblur (RawImage *riFlatFile, float* cfablur, const int boxH, const int boxW) { - if(boxW == 0 && boxH == 0) { // nothing to blur - memcpy(cfablur, riFlatFile->data[0], W * H * sizeof(float)); + if (boxW == 0 && boxH == 0) { // nothing to blur + memcpy (cfablur, riFlatFile->data[0], W * H * sizeof (float)); return; } @@ -3065,19 +3063,19 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur float *srcVertical = nullptr; - if(boxH > 0 && boxW > 0) { + if (boxH > 0 && boxW > 0) { // we need a temporary buffer if we have to blur both directions tmpBuffer = (float (*)) calloc (H * W, sizeof * tmpBuffer); } - if(boxH == 0) { + if (boxH == 0) { // if boxH == 0 we can skip the vertical blur and process the horizontal blur from riFlatFile to cfablur without using a temporary buffer cfatmp = cfablur; } else { cfatmp = tmpBuffer; } - if(boxW == 0) { + if (boxW == 0) { // if boxW == 0 we can skip the horizontal blur and process the vertical blur from riFlatFile to cfablur without using a temporary buffer srcVertical = riFlatFile->data[0]; } else { @@ -3089,7 +3087,7 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur #endif { - if(boxW > 0) { + if (boxW > 0) { //box blur cfa image; box size = BS //horizontal blur #ifdef _OPENMP @@ -3128,11 +3126,11 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur } } - if(boxH > 0) { + if (boxH > 0) { //vertical blur #ifdef __SSE2__ - vfloat leninitv = F2V(boxH / 2 + 1); - vfloat onev = F2V( 1.0f ); + vfloat leninitv = F2V (boxH / 2 + 1); + vfloat onev = F2V ( 1.0f ); vfloat temp1v, temp2v, temp3v, temp4v, lenv, lenp1v, lenm1v; int row; #ifdef _OPENMP @@ -3141,52 +3139,52 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur for (int col = 0; col < W - 7; col += 8) { lenv = leninitv; - temp1v = LVFU(srcVertical[0 * W + col]) / lenv; - temp2v = LVFU(srcVertical[1 * W + col]) / lenv; - temp3v = LVFU(srcVertical[0 * W + col + 4]) / lenv; - temp4v = LVFU(srcVertical[1 * W + col + 4]) / lenv; + temp1v = LVFU (srcVertical[0 * W + col]) / lenv; + temp2v = LVFU (srcVertical[1 * W + col]) / lenv; + temp3v = LVFU (srcVertical[0 * W + col + 4]) / lenv; + temp4v = LVFU (srcVertical[1 * W + col + 4]) / lenv; for (int i = 2; i < boxH + 2; i += 2) { - temp1v += LVFU(srcVertical[i * W + col]) / lenv; - temp2v += LVFU(srcVertical[(i + 1) * W + col]) / lenv; - temp3v += LVFU(srcVertical[i * W + col + 4]) / lenv; - temp4v += LVFU(srcVertical[(i + 1) * W + col + 4]) / lenv; + temp1v += LVFU (srcVertical[i * W + col]) / lenv; + temp2v += LVFU (srcVertical[ (i + 1) * W + col]) / lenv; + temp3v += LVFU (srcVertical[i * W + col + 4]) / lenv; + temp4v += LVFU (srcVertical[ (i + 1) * W + col + 4]) / lenv; } - STVFU(cfablur[0 * W + col], temp1v); - STVFU(cfablur[1 * W + col], temp2v); - STVFU(cfablur[0 * W + col + 4], temp3v); - STVFU(cfablur[1 * W + col + 4], temp4v); + STVFU (cfablur[0 * W + col], temp1v); + STVFU (cfablur[1 * W + col], temp2v); + STVFU (cfablur[0 * W + col + 4], temp3v); + STVFU (cfablur[1 * W + col + 4], temp4v); for (row = 2; row < boxH + 2; row += 2) { lenp1v = lenv + onev; - temp1v = (temp1v * lenv + LVFU(srcVertical[(row + boxH) * W + col])) / lenp1v; - temp2v = (temp2v * lenv + LVFU(srcVertical[(row + boxH + 1) * W + col])) / lenp1v; - temp3v = (temp3v * lenv + LVFU(srcVertical[(row + boxH) * W + col + 4])) / lenp1v; - temp4v = (temp4v * lenv + LVFU(srcVertical[(row + boxH + 1) * W + col + 4])) / lenp1v; - STVFU(cfablur[row * W + col], temp1v); - STVFU(cfablur[(row + 1)*W + col], temp2v); - STVFU(cfablur[row * W + col + 4], temp3v); - STVFU(cfablur[(row + 1)*W + col + 4], temp4v); + temp1v = (temp1v * lenv + LVFU (srcVertical[ (row + boxH) * W + col])) / lenp1v; + temp2v = (temp2v * lenv + LVFU (srcVertical[ (row + boxH + 1) * W + col])) / lenp1v; + temp3v = (temp3v * lenv + LVFU (srcVertical[ (row + boxH) * W + col + 4])) / lenp1v; + temp4v = (temp4v * lenv + LVFU (srcVertical[ (row + boxH + 1) * W + col + 4])) / lenp1v; + STVFU (cfablur[row * W + col], temp1v); + STVFU (cfablur[ (row + 1)*W + col], temp2v); + STVFU (cfablur[row * W + col + 4], temp3v); + STVFU (cfablur[ (row + 1)*W + col + 4], temp4v); lenv = lenp1v; } for (; row < H - boxH - 1; row += 2) { - temp1v = temp1v + (LVFU(srcVertical[(row + boxH) * W + col]) - LVFU(srcVertical[(row - boxH - 2) * W + col])) / lenv; - temp2v = temp2v + (LVFU(srcVertical[(row + 1 + boxH) * W + col]) - LVFU(srcVertical[(row + 1 - boxH - 2) * W + col])) / lenv; - temp3v = temp3v + (LVFU(srcVertical[(row + boxH) * W + col + 4]) - LVFU(srcVertical[(row - boxH - 2) * W + col + 4])) / lenv; - temp4v = temp4v + (LVFU(srcVertical[(row + 1 + boxH) * W + col + 4]) - LVFU(srcVertical[(row + 1 - boxH - 2) * W + col + 4])) / lenv; - STVFU(cfablur[row * W + col], temp1v); - STVFU(cfablur[(row + 1)*W + col], temp2v); - STVFU(cfablur[row * W + col + 4], temp3v); - STVFU(cfablur[(row + 1)*W + col + 4], temp4v); + temp1v = temp1v + (LVFU (srcVertical[ (row + boxH) * W + col]) - LVFU (srcVertical[ (row - boxH - 2) * W + col])) / lenv; + temp2v = temp2v + (LVFU (srcVertical[ (row + 1 + boxH) * W + col]) - LVFU (srcVertical[ (row + 1 - boxH - 2) * W + col])) / lenv; + temp3v = temp3v + (LVFU (srcVertical[ (row + boxH) * W + col + 4]) - LVFU (srcVertical[ (row - boxH - 2) * W + col + 4])) / lenv; + temp4v = temp4v + (LVFU (srcVertical[ (row + 1 + boxH) * W + col + 4]) - LVFU (srcVertical[ (row + 1 - boxH - 2) * W + col + 4])) / lenv; + STVFU (cfablur[row * W + col], temp1v); + STVFU (cfablur[ (row + 1)*W + col], temp2v); + STVFU (cfablur[row * W + col + 4], temp3v); + STVFU (cfablur[ (row + 1)*W + col + 4], temp4v); } - for(; row < H - boxH; row++) { - temp1v = temp1v + (LVFU(srcVertical[(row + boxH) * W + col]) - LVFU(srcVertical[(row - boxH - 2) * W + col])) / lenv; - temp3v = temp3v + (LVFU(srcVertical[(row + boxH) * W + col + 4]) - LVFU(srcVertical[(row - boxH - 2) * W + col + 4])) / lenv; - STVFU(cfablur[row * W + col], temp1v); - STVFU(cfablur[row * W + col + 4], temp3v); + for (; row < H - boxH; row++) { + temp1v = temp1v + (LVFU (srcVertical[ (row + boxH) * W + col]) - LVFU (srcVertical[ (row - boxH - 2) * W + col])) / lenv; + temp3v = temp3v + (LVFU (srcVertical[ (row + boxH) * W + col + 4]) - LVFU (srcVertical[ (row - boxH - 2) * W + col + 4])) / lenv; + STVFU (cfablur[row * W + col], temp1v); + STVFU (cfablur[row * W + col + 4], temp3v); vfloat swapv = temp1v; temp1v = temp2v; temp2v = swapv; @@ -3197,23 +3195,23 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur for (; row < H - 1; row += 2) { lenm1v = lenv - onev; - temp1v = (temp1v * lenv - LVFU(srcVertical[(row - boxH - 2) * W + col])) / lenm1v; - temp2v = (temp2v * lenv - LVFU(srcVertical[(row - boxH - 1) * W + col])) / lenm1v; - temp3v = (temp3v * lenv - LVFU(srcVertical[(row - boxH - 2) * W + col + 4])) / lenm1v; - temp4v = (temp4v * lenv - LVFU(srcVertical[(row - boxH - 1) * W + col + 4])) / lenm1v; - STVFU(cfablur[row * W + col], temp1v); - STVFU(cfablur[(row + 1)*W + col], temp2v); - STVFU(cfablur[row * W + col + 4], temp3v); - STVFU(cfablur[(row + 1)*W + col + 4], temp4v); + temp1v = (temp1v * lenv - LVFU (srcVertical[ (row - boxH - 2) * W + col])) / lenm1v; + temp2v = (temp2v * lenv - LVFU (srcVertical[ (row - boxH - 1) * W + col])) / lenm1v; + temp3v = (temp3v * lenv - LVFU (srcVertical[ (row - boxH - 2) * W + col + 4])) / lenm1v; + temp4v = (temp4v * lenv - LVFU (srcVertical[ (row - boxH - 1) * W + col + 4])) / lenm1v; + STVFU (cfablur[row * W + col], temp1v); + STVFU (cfablur[ (row + 1)*W + col], temp2v); + STVFU (cfablur[row * W + col + 4], temp3v); + STVFU (cfablur[ (row + 1)*W + col + 4], temp4v); lenv = lenm1v; } - for(; row < H; row++) { + for (; row < H; row++) { lenm1v = lenv - onev; - temp1v = (temp1v * lenv - LVFU(srcVertical[(row - boxH - 2) * W + col])) / lenm1v; - temp3v = (temp3v * lenv - LVFU(srcVertical[(row - boxH - 2) * W + col + 4])) / lenm1v; - STVFU(cfablur[(row)*W + col], temp1v); - STVFU(cfablur[(row)*W + col + 4], temp3v); + temp1v = (temp1v * lenv - LVFU (srcVertical[ (row - boxH - 2) * W + col])) / lenm1v; + temp3v = (temp3v * lenv - LVFU (srcVertical[ (row - boxH - 2) * W + col + 4])) / lenm1v; + STVFU (cfablur[ (row)*W + col], temp1v); + STVFU (cfablur[ (row)*W + col + 4], temp3v); } } @@ -3227,24 +3225,24 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur for (int i = 2; i < boxH + 2; i += 2) { cfablur[0 * W + col] += srcVertical[i * W + col] / len; - cfablur[1 * W + col] += srcVertical[(i + 1) * W + col] / len; + cfablur[1 * W + col] += srcVertical[ (i + 1) * W + col] / len; } for (int row = 2; row < boxH + 2; row += 2) { - cfablur[row * W + col] = (cfablur[(row - 2) * W + col] * len + srcVertical[(row + boxH) * W + col]) / (len + 1); - cfablur[(row + 1)*W + col] = (cfablur[(row - 1) * W + col] * len + srcVertical[(row + boxH + 1) * W + col]) / (len + 1); + cfablur[row * W + col] = (cfablur[ (row - 2) * W + col] * len + srcVertical[ (row + boxH) * W + col]) / (len + 1); + cfablur[ (row + 1)*W + col] = (cfablur[ (row - 1) * W + col] * len + srcVertical[ (row + boxH + 1) * W + col]) / (len + 1); len ++; } for (int row = boxH + 2; row < H - boxH; row++) { - cfablur[row * W + col] = cfablur[(row - 2) * W + col] + (srcVertical[(row + boxH) * W + col] - srcVertical[(row - boxH - 2) * W + col]) / len; + cfablur[row * W + col] = cfablur[ (row - 2) * W + col] + (srcVertical[ (row + boxH) * W + col] - srcVertical[ (row - boxH - 2) * W + col]) / len; } for (int row = H - boxH; row < H; row += 2) { - cfablur[row * W + col] = (cfablur[(row - 2) * W + col] * len - srcVertical[(row - boxH - 2) * W + col]) / (len - 1); + cfablur[row * W + col] = (cfablur[ (row - 2) * W + col] * len - srcVertical[ (row - boxH - 2) * W + col]) / (len - 1); if (row + 1 < H) { - cfablur[(row + 1)*W + col] = (cfablur[(row - 1) * W + col] * len - srcVertical[(row - boxH - 1) * W + col]) / (len - 1); + cfablur[ (row + 1)*W + col] = (cfablur[ (row - 1) * W + col] * len - srcVertical[ (row - boxH - 1) * W + col]) / (len - 1); } len --; @@ -3263,24 +3261,24 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur for (int i = 2; i < boxH + 2; i += 2) { cfablur[0 * W + col] += srcVertical[i * W + col] / len; - cfablur[1 * W + col] += srcVertical[(i + 1) * W + col] / len; + cfablur[1 * W + col] += srcVertical[ (i + 1) * W + col] / len; } for (int row = 2; row < boxH + 2; row += 2) { - cfablur[row * W + col] = (cfablur[(row - 2) * W + col] * len + srcVertical[(row + boxH) * W + col]) / (len + 1); - cfablur[(row + 1)*W + col] = (cfablur[(row - 1) * W + col] * len + srcVertical[(row + boxH + 1) * W + col]) / (len + 1); + cfablur[row * W + col] = (cfablur[ (row - 2) * W + col] * len + srcVertical[ (row + boxH) * W + col]) / (len + 1); + cfablur[ (row + 1)*W + col] = (cfablur[ (row - 1) * W + col] * len + srcVertical[ (row + boxH + 1) * W + col]) / (len + 1); len ++; } for (int row = boxH + 2; row < H - boxH; row++) { - cfablur[row * W + col] = cfablur[(row - 2) * W + col] + (srcVertical[(row + boxH) * W + col] - srcVertical[(row - boxH - 2) * W + col]) / len; + cfablur[row * W + col] = cfablur[ (row - 2) * W + col] + (srcVertical[ (row + boxH) * W + col] - srcVertical[ (row - boxH - 2) * W + col]) / len; } for (int row = H - boxH; row < H; row += 2) { - cfablur[row * W + col] = (cfablur[(row - 2) * W + col] * len - srcVertical[(row - boxH - 2) * W + col]) / (len - 1); + cfablur[row * W + col] = (cfablur[ (row - 2) * W + col] * len - srcVertical[ (row - boxH - 2) * W + col]) / (len - 1); if (row + 1 < H) { - cfablur[(row + 1)*W + col] = (cfablur[(row - 1) * W + col] * len - srcVertical[(row - boxH - 1) * W + col]) / (len - 1); + cfablur[ (row + 1)*W + col] = (cfablur[ (row - 1) * W + col] * len - srcVertical[ (row - boxH - 1) * W + col]) / (len - 1); } len --; @@ -3291,14 +3289,14 @@ SSEFUNCTION void RawImageSource::cfaboxblur(RawImage *riFlatFile, float* cfablur } } - if(tmpBuffer) { + if (tmpBuffer) { free (tmpBuffer); } } // Scale original pixels into the range 0 65535 using black offsets and multipliers -void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const RAWParams &raw) +void RawImageSource::scaleColors (int winx, int winy, int winw, int winh, const RAWParams &raw) { chmax[0] = chmax[1] = chmax[2] = chmax[3] = 0; //channel maxima float black_lev[4] = {0.f};//black level @@ -3324,14 +3322,14 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R isMono = RAWParams::XTransSensor::methodstring[RAWParams::XTransSensor::mono] == raw.xtranssensor.method; } - for(int i = 0; i < 4 ; i++) { - cblacksom[i] = max( c_black[i] + black_lev[i], 0.0f ); // adjust black level + for (int i = 0; i < 4 ; i++) { + cblacksom[i] = max ( c_black[i] + black_lev[i], 0.0f ); // adjust black level } - initialGain = calculate_scale_mul(scale_mul, ref_pre_mul, c_white, cblacksom, isMono, ri->get_colors()); // recalculate scale colors with adjusted levels + initialGain = calculate_scale_mul (scale_mul, ref_pre_mul, c_white, cblacksom, isMono, ri->get_colors()); // recalculate scale colors with adjusted levels //fprintf(stderr, "recalc: %f [%f %f %f %f]\n", initialGain, scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3]); - for(int i = 0; i < 4 ; i++) { + for (int i = 0; i < 4 ; i++) { clmax[i] = (c_white[i] - cblacksom[i]) * scale_mul[i]; // raw clip level } @@ -3339,7 +3337,7 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R // scale image colors - if( ri->getSensorType() == ST_BAYER) { + if ( ri->getSensorType() == ST_BAYER) { #ifdef _OPENMP #pragma omp parallel #endif @@ -3354,12 +3352,12 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R { for (int col = winx; col < winx + winw; col++) { float val = rawData[row][col]; - int c = FC(row, col); // three colors, 0=R, 1=G, 2=B - int c4 = ( c == 1 && !(row & 1) ) ? 3 : c; // four colors, 0=R, 1=G1, 2=B, 3=G2 + int c = FC (row, col); // three colors, 0=R, 1=G, 2=B + int c4 = ( c == 1 && ! (row & 1) ) ? 3 : c; // four colors, 0=R, 1=G1, 2=B, 3=G2 val -= cblacksom[c4]; val *= scale_mul[c4]; rawData[row][col] = (val); - tmpchmax[c] = max(tmpchmax[c], val); + tmpchmax[c] = max (tmpchmax[c], val); } } @@ -3367,9 +3365,9 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R #pragma omp critical #endif { - chmax[0] = max(tmpchmax[0], chmax[0]); - chmax[1] = max(tmpchmax[1], chmax[1]); - chmax[2] = max(tmpchmax[2], chmax[2]); + chmax[0] = max (tmpchmax[0], chmax[0]); + chmax[1] = max (tmpchmax[1], chmax[1]); + chmax[2] = max (tmpchmax[2], chmax[2]); } } } else if ( ri->get_colors() == 1 ) { @@ -3389,7 +3387,7 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R val -= cblacksom[0]; val *= scale_mul[0]; rawData[row][col] = (val); - tmpchmax = max(tmpchmax, val); + tmpchmax = max (tmpchmax, val); } } @@ -3397,10 +3395,10 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R #pragma omp critical #endif { - chmax[0] = chmax[1] = chmax[2] = chmax[3] = max(tmpchmax, chmax[0]); + chmax[0] = chmax[1] = chmax[2] = chmax[3] = max (tmpchmax, chmax[0]); } } - } else if(ri->getSensorType() == ST_FUJI_XTRANS) { + } else if (ri->getSensorType() == ST_FUJI_XTRANS) { #ifdef _OPENMP #pragma omp parallel #endif @@ -3415,12 +3413,12 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R { for (int col = winx; col < winx + winw; col++) { float val = rawData[row][col]; - int c = ri->XTRANSFC(row, col); + int c = ri->XTRANSFC (row, col); val -= cblacksom[c]; val *= scale_mul[c]; rawData[row][col] = (val); - tmpchmax[c] = max(tmpchmax[c], val); + tmpchmax[c] = max (tmpchmax[c], val); } } @@ -3428,9 +3426,9 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R #pragma omp critical #endif { - chmax[0] = max(tmpchmax[0], chmax[0]); - chmax[1] = max(tmpchmax[1], chmax[1]); - chmax[2] = max(tmpchmax[2], chmax[2]); + chmax[0] = max (tmpchmax[0], chmax[0]); + chmax[1] = max (tmpchmax[1], chmax[1]); + chmax[2] = max (tmpchmax[2], chmax[2]); } } } else { @@ -3452,7 +3450,7 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R val -= cblacksom[c]; val *= scale_mul[c]; rawData[row][3 * col + c] = (val); - tmpchmax[c] = max(tmpchmax[c], val); + tmpchmax[c] = max (tmpchmax[c], val); } } } @@ -3461,9 +3459,9 @@ void RawImageSource::scaleColors(int winx, int winy, int winw, int winh, const R #pragma omp critical #endif { - chmax[0] = max(tmpchmax[0], chmax[0]); - chmax[1] = max(tmpchmax[1], chmax[1]); - chmax[2] = max(tmpchmax[2], chmax[2]); + chmax[0] = max (tmpchmax[0], chmax[0]); + chmax[1] = max (tmpchmax[1], chmax[1]); + chmax[2] = max (tmpchmax[2], chmax[2]); } } chmax[3] = chmax[1]; @@ -3534,8 +3532,8 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D int px = (row_from - 1) % 3, cx = row_from % 3, nx = 0; - convert_row_to_YIQ (im->r(row_from - 1), im->g(row_from - 1), im->b(row_from - 1), rbconv_Y[px], rbconv_I[px], rbconv_Q[px], W); - convert_row_to_YIQ (im->r(row_from), im->g(row_from), im->b(row_from), rbconv_Y[cx], rbconv_I[cx], rbconv_Q[cx], W); + convert_row_to_YIQ (im->r (row_from - 1), im->g (row_from - 1), im->b (row_from - 1), rbconv_Y[px], rbconv_I[px], rbconv_Q[px], W); + convert_row_to_YIQ (im->r (row_from), im->g (row_from), im->b (row_from), rbconv_Y[cx], rbconv_I[cx], rbconv_Q[cx], W); for (int j = 0; j < W; j++) { rbout_I[px][j] = rbconv_I[px][j]; @@ -3548,11 +3546,11 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D cx = i % 3; nx = (i + 1) % 3; - convert_row_to_YIQ (im->r(i + 1), im->g(i + 1), im->b(i + 1), rbconv_Y[nx], rbconv_I[nx], rbconv_Q[nx], W); + convert_row_to_YIQ (im->r (i + 1), im->g (i + 1), im->b (i + 1), rbconv_Y[nx], rbconv_I[nx], rbconv_Q[nx], W); #ifdef __SSE2__ - pre1[0] = _mm_setr_ps(rbconv_I[px][0], rbconv_Q[px][0], 0, 0) , pre1[1] = _mm_setr_ps(rbconv_I[cx][0], rbconv_Q[cx][0], 0, 0), pre1[2] = _mm_setr_ps(rbconv_I[nx][0], rbconv_Q[nx][0], 0, 0); - pre2[0] = _mm_setr_ps(rbconv_I[px][1], rbconv_Q[px][1], 0, 0) , pre2[1] = _mm_setr_ps(rbconv_I[cx][1], rbconv_Q[cx][1], 0, 0), pre2[2] = _mm_setr_ps(rbconv_I[nx][1], rbconv_Q[nx][1], 0, 0); + pre1[0] = _mm_setr_ps (rbconv_I[px][0], rbconv_Q[px][0], 0, 0) , pre1[1] = _mm_setr_ps (rbconv_I[cx][0], rbconv_Q[cx][0], 0, 0), pre1[2] = _mm_setr_ps (rbconv_I[nx][0], rbconv_Q[nx][0], 0, 0); + pre2[0] = _mm_setr_ps (rbconv_I[px][1], rbconv_Q[px][1], 0, 0) , pre2[1] = _mm_setr_ps (rbconv_I[cx][1], rbconv_Q[cx][1], 0, 0), pre2[2] = _mm_setr_ps (rbconv_I[nx][1], rbconv_Q[nx][1], 0, 0); // fill first element in rbout_I and rbout_Q rbout_I[cx][0] = rbconv_I[cx][0]; @@ -3560,17 +3558,17 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D // median I channel for (int j = 1; j < W - 2; j += 2) { - post1[0] = _mm_setr_ps(rbconv_I[px][j + 1], rbconv_Q[px][j + 1], 0, 0), post1[1] = _mm_setr_ps(rbconv_I[cx][j + 1], rbconv_Q[cx][j + 1], 0, 0), post1[2] = _mm_setr_ps(rbconv_I[nx][j + 1], rbconv_Q[nx][j + 1], 0, 0); - const auto middle = middle4of6(pre2[0], pre2[1], pre2[2], post1[0], post1[1], post1[2]); - vfloat medianval = median(pre1[0], pre1[1], pre1[2], middle[0], middle[1], middle[2], middle[3]); + post1[0] = _mm_setr_ps (rbconv_I[px][j + 1], rbconv_Q[px][j + 1], 0, 0), post1[1] = _mm_setr_ps (rbconv_I[cx][j + 1], rbconv_Q[cx][j + 1], 0, 0), post1[2] = _mm_setr_ps (rbconv_I[nx][j + 1], rbconv_Q[nx][j + 1], 0, 0); + const auto middle = middle4of6 (pre2[0], pre2[1], pre2[2], post1[0], post1[1], post1[2]); + vfloat medianval = median (pre1[0], pre1[1], pre1[2], middle[0], middle[1], middle[2], middle[3]); rbout_I[cx][j] = medianval[0]; rbout_Q[cx][j] = medianval[1]; - post2[0] = _mm_setr_ps(rbconv_I[px][j + 2], rbconv_Q[px][j + 2], 0, 0), post2[1] = _mm_setr_ps(rbconv_I[cx][j + 2], rbconv_Q[cx][j + 2], 0, 0), post2[2] = _mm_setr_ps(rbconv_I[nx][j + 2], rbconv_Q[nx][j + 2], 0, 0); - medianval = median(post2[0], post2[1], post2[2], middle[0], middle[1], middle[2], middle[3]); + post2[0] = _mm_setr_ps (rbconv_I[px][j + 2], rbconv_Q[px][j + 2], 0, 0), post2[1] = _mm_setr_ps (rbconv_I[cx][j + 2], rbconv_Q[cx][j + 2], 0, 0), post2[2] = _mm_setr_ps (rbconv_I[nx][j + 2], rbconv_Q[nx][j + 2], 0, 0); + medianval = median (post2[0], post2[1], post2[2], middle[0], middle[1], middle[2], middle[3]); rbout_I[cx][j + 1] = medianval[0]; rbout_Q[cx][j + 1] = medianval[1]; - std::swap(pre1, post1); - std::swap(pre2, post2); + std::swap (pre1, post1); + std::swap (pre2, post2); } // fill last elements in rbout_I and rbout_Q @@ -3589,12 +3587,12 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D // median I channel for (int j = 1; j < W - 2; j += 2) { post1[0] = rbconv_I[px][j + 1], post1[1] = rbconv_I[cx][j + 1], post1[2] = rbconv_I[nx][j + 1]; - const auto middle = middle4of6(pre2[0], pre2[1], pre2[2], post1[0], post1[1], post1[2]); - rbout_I[cx][j] = median(pre1[0], pre1[1], pre1[2], middle[0], middle[1], middle[2], middle[3]); + const auto middle = middle4of6 (pre2[0], pre2[1], pre2[2], post1[0], post1[1], post1[2]); + rbout_I[cx][j] = median (pre1[0], pre1[1], pre1[2], middle[0], middle[1], middle[2], middle[3]); post2[0] = rbconv_I[px][j + 2], post2[1] = rbconv_I[cx][j + 2], post2[2] = rbconv_I[nx][j + 2]; - rbout_I[cx][j + 1] = median(post2[0], post2[1], post2[2], middle[0], middle[1], middle[2], middle[3]); - std::swap(pre1, post1); - std::swap(pre2, post2); + rbout_I[cx][j + 1] = median (post2[0], post2[1], post2[2], middle[0], middle[1], middle[2], middle[3]); + std::swap (pre1, post1); + std::swap (pre2, post2); } // fill last elements in rbout_I @@ -3610,12 +3608,12 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D // median Q channel for (int j = 1; j < W - 2; j += 2) { post1[0] = rbconv_Q[px][j + 1], post1[1] = rbconv_Q[cx][j + 1], post1[2] = rbconv_Q[nx][j + 1]; - const auto middle = middle4of6(pre2[0], pre2[1], pre2[2], post1[0], post1[1], post1[2]); - rbout_Q[cx][j] = median(pre1[0], pre1[1], pre1[2], middle[0], middle[1], middle[2], middle[3]); + const auto middle = middle4of6 (pre2[0], pre2[1], pre2[2], post1[0], post1[1], post1[2]); + rbout_Q[cx][j] = median (pre1[0], pre1[1], pre1[2], middle[0], middle[1], middle[2], middle[3]); post2[0] = rbconv_Q[px][j + 2], post2[1] = rbconv_Q[cx][j + 2], post2[2] = rbconv_Q[nx][j + 2]; - rbout_Q[cx][j + 1] = median(post2[0], post2[1], post2[2], middle[0], middle[1], middle[2], middle[3]); - std::swap(pre1, post1); - std::swap(pre2, post2); + rbout_Q[cx][j + 1] = median (post2[0], post2[1], post2[2], middle[0], middle[1], middle[2], middle[3]); + std::swap (pre1, post1); + std::swap (pre2, post2); } // fill last elements in rbout_Q @@ -3625,7 +3623,7 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D // blur i-1th row if (i > row_from) { - convert_to_RGB (im->r(i - 1, 0), im->g(i - 1, 0), im->b(i - 1, 0), rbconv_Y[px][0], rbout_I[px][0], rbout_Q[px][0]); + convert_to_RGB (im->r (i - 1, 0), im->g (i - 1, 0), im->b (i - 1, 0), rbconv_Y[px][0], rbout_I[px][0], rbout_Q[px][0]); #ifdef _OPENMP #pragma omp simd @@ -3634,15 +3632,15 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D for (int j = 1; j < W - 1; j++) { float I = (rbout_I[px][j - 1] + rbout_I[px][j] + rbout_I[px][j + 1] + rbout_I[cx][j - 1] + rbout_I[cx][j] + rbout_I[cx][j + 1] + rbout_I[nx][j - 1] + rbout_I[nx][j] + rbout_I[nx][j + 1]) * onebynine; float Q = (rbout_Q[px][j - 1] + rbout_Q[px][j] + rbout_Q[px][j + 1] + rbout_Q[cx][j - 1] + rbout_Q[cx][j] + rbout_Q[cx][j + 1] + rbout_Q[nx][j - 1] + rbout_Q[nx][j] + rbout_Q[nx][j + 1]) * onebynine; - convert_to_RGB (im->r(i - 1, j), im->g(i - 1, j), im->b(i - 1, j), rbconv_Y[px][j], I, Q); + convert_to_RGB (im->r (i - 1, j), im->g (i - 1, j), im->b (i - 1, j), rbconv_Y[px][j], I, Q); } - convert_to_RGB (im->r(i - 1, W - 1), im->g(i - 1, W - 1), im->b(i - 1, W - 1), rbconv_Y[px][W - 1], rbout_I[px][W - 1], rbout_Q[px][W - 1]); + convert_to_RGB (im->r (i - 1, W - 1), im->g (i - 1, W - 1), im->b (i - 1, W - 1), rbconv_Y[px][W - 1], rbout_I[px][W - 1], rbout_Q[px][W - 1]); } } // blur last 3 row and finalize H-1th row - convert_to_RGB (im->r(row_to - 1, 0), im->g(row_to - 1, 0), im->b(row_to - 1, 0), rbconv_Y[cx][0], rbout_I[cx][0], rbout_Q[cx][0]); + convert_to_RGB (im->r (row_to - 1, 0), im->g (row_to - 1, 0), im->b (row_to - 1, 0), rbconv_Y[cx][0], rbout_I[cx][0], rbout_Q[cx][0]); #ifdef _OPENMP #pragma omp simd #endif @@ -3650,10 +3648,10 @@ void RawImageSource::processFalseColorCorrectionThread (Imagefloat* im, array2D for (int j = 1; j < W - 1; j++) { float I = (rbout_I[px][j - 1] + rbout_I[px][j] + rbout_I[px][j + 1] + rbout_I[cx][j - 1] + rbout_I[cx][j] + rbout_I[cx][j + 1] + rbconv_I[nx][j - 1] + rbconv_I[nx][j] + rbconv_I[nx][j + 1]) * onebynine; float Q = (rbout_Q[px][j - 1] + rbout_Q[px][j] + rbout_Q[px][j + 1] + rbout_Q[cx][j - 1] + rbout_Q[cx][j] + rbout_Q[cx][j + 1] + rbconv_Q[nx][j - 1] + rbconv_Q[nx][j] + rbconv_Q[nx][j + 1]) * onebynine; - convert_to_RGB (im->r(row_to - 1, j), im->g(row_to - 1, j), im->b(row_to - 1, j), rbconv_Y[cx][j], I, Q); + convert_to_RGB (im->r (row_to - 1, j), im->g (row_to - 1, j), im->b (row_to - 1, j), rbconv_Y[cx][j], I, Q); } - convert_to_RGB (im->r(row_to - 1, W - 1), im->g(row_to - 1, W - 1), im->b(row_to - 1, W - 1), rbconv_Y[cx][W - 1], rbout_I[cx][W - 1], rbout_Q[cx][W - 1]); + convert_to_RGB (im->r (row_to - 1, W - 1), im->g (row_to - 1, W - 1), im->b (row_to - 1, W - 1), rbconv_Y[cx][W - 1], rbout_I[cx][W - 1], rbout_Q[cx][W - 1]); } //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -3697,7 +3695,7 @@ void RawImageSource::processFalseColorCorrection (Imagefloat* im, const int ste // Some camera input profiles need gamma preprocessing // gamma is applied before the CMS, correct line fac=lineFac*rawPixel+LineSum after the CMS -void RawImageSource::getProfilePreprocParams(cmsHPROFILE in, float& gammaFac, float& lineFac, float& lineSum) +void RawImageSource::getProfilePreprocParams (cmsHPROFILE in, float& gammaFac, float& lineFac, float& lineSum) { gammaFac = 0; lineFac = 1; @@ -3706,10 +3704,10 @@ void RawImageSource::getProfilePreprocParams(cmsHPROFILE in, float& gammaFac, fl char copyright[256]; copyright[0] = 0; - if (cmsGetProfileInfoASCII(in, cmsInfoCopyright, cmsNoLanguage, cmsNoCountry, copyright, 256) > 0) { - if (strstr(copyright, "Phase One") != nullptr) { + if (cmsGetProfileInfoASCII (in, cmsInfoCopyright, cmsNoLanguage, cmsNoCountry, copyright, 256) > 0) { + if (strstr (copyright, "Phase One") != nullptr) { gammaFac = 0.55556; // 1.8 - } else if (strstr(copyright, "Nikon Corporation") != nullptr) { + } else if (strstr (copyright, "Nikon Corporation") != nullptr) { gammaFac = 0.5; lineFac = -0.4; lineSum = 1.35; // determined in reverse by measuring NX an RT developed colorchecker PNGs @@ -3720,7 +3718,7 @@ void RawImageSource::getProfilePreprocParams(cmsHPROFILE in, float& gammaFac, fl //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% static void -lab2ProphotoRgbD50(float L, float A, float B, float& r, float& g, float& b) +lab2ProphotoRgbD50 (float L, float A, float B, float& r, float& g, float& b) { float X; float Y; @@ -3760,9 +3758,9 @@ lab2ProphotoRgbD50(float L, float A, float B, float& r, float& g, float& b) r = prophoto_xyz[0][0] * X + prophoto_xyz[0][1] * Y + prophoto_xyz[0][2] * Z; g = prophoto_xyz[1][0] * X + prophoto_xyz[1][1] * Y + prophoto_xyz[1][2] * Z; b = prophoto_xyz[2][0] * X + prophoto_xyz[2][1] * Y + prophoto_xyz[2][2] * Z; - r = CLIP01(r); - g = CLIP01(g); - b = CLIP01(b); + r = CLIP01 (r); + g = CLIP01 (g); + b = CLIP01 (b); } // Converts raw image including ICC input profile to working space - floating point version @@ -3774,7 +3772,7 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam cmsHPROFILE in; DCPProfile *dcpProf; - if (!findInputProfile(cmp.input, embedded, camName, &dcpProf, in)) { + if (!findInputProfile (cmp.input, embedded, camName, &dcpProf, in)) { return; } @@ -3791,7 +3789,7 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam {camMatrix[2][0], camMatrix[2][1], camMatrix[2][2]} } }; - dcpProf->apply(im, cmp.dcpIlluminant, cmp.working, wb, pre_mul_row, cam_matrix, cmp.applyHueSatMap); + dcpProf->apply (im, cmp.dcpIlluminant, cmp.working, wb, pre_mul_row, cam_matrix, cmp.applyHueSatMap); return; } @@ -3816,13 +3814,13 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam for (int i = 0; i < im->height; i++) for (int j = 0; j < im->width; j++) { - float newr = mat[0][0] * im->r(i, j) + mat[0][1] * im->g(i, j) + mat[0][2] * im->b(i, j); - float newg = mat[1][0] * im->r(i, j) + mat[1][1] * im->g(i, j) + mat[1][2] * im->b(i, j); - float newb = mat[2][0] * im->r(i, j) + mat[2][1] * im->g(i, j) + mat[2][2] * im->b(i, j); + float newr = mat[0][0] * im->r (i, j) + mat[0][1] * im->g (i, j) + mat[0][2] * im->b (i, j); + float newg = mat[1][0] * im->r (i, j) + mat[1][1] * im->g (i, j) + mat[1][2] * im->b (i, j); + float newb = mat[2][0] * im->r (i, j) + mat[2][1] * im->g (i, j) + mat[2][2] * im->b (i, j); - im->r(i, j) = newr; - im->g(i, j) = newg; - im->b(i, j) = newb; + im->r (i, j) = newr; + im->g (i, j) = newg; + im->b (i, j) = newb; } } else { @@ -3866,33 +3864,33 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam char copyright[256] = ""; char description[256] = ""; - cmsGetProfileInfoASCII(in, cmsInfoCopyright, cmsNoLanguage, cmsNoCountry, copyright, 256); - cmsGetProfileInfoASCII(in, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, description, 256); + cmsGetProfileInfoASCII (in, cmsInfoCopyright, cmsNoLanguage, cmsNoCountry, copyright, 256); + cmsGetProfileInfoASCII (in, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, description, 256); camera_icc_type = CAMERA_ICC_TYPE_GENERIC; // Note: order the identification with the most detailed matching first since the more general ones may also match the more detailed - if ((strstr(copyright, "Leaf") != nullptr || - strstr(copyright, "Phase One A/S") != nullptr || - strstr(copyright, "Kodak") != nullptr || - strstr(copyright, "Creo") != nullptr) && - (strstr(description, "LF2 ") == description || - strstr(description, "LF3 ") == description || - strstr(description, "LeafLF2") == description || - strstr(description, "LeafLF3") == description || - strstr(description, "LeafLF4") == description || - strstr(description, "MamiyaLF2") == description || - strstr(description, "MamiyaLF3") == description)) { + if ((strstr (copyright, "Leaf") != nullptr || + strstr (copyright, "Phase One A/S") != nullptr || + strstr (copyright, "Kodak") != nullptr || + strstr (copyright, "Creo") != nullptr) && + (strstr (description, "LF2 ") == description || + strstr (description, "LF3 ") == description || + strstr (description, "LeafLF2") == description || + strstr (description, "LeafLF3") == description || + strstr (description, "LeafLF4") == description || + strstr (description, "MamiyaLF2") == description || + strstr (description, "MamiyaLF3") == description)) { camera_icc_type = CAMERA_ICC_TYPE_LEAF; - } else if (strstr(copyright, "Phase One A/S") != nullptr) { + } else if (strstr (copyright, "Phase One A/S") != nullptr) { camera_icc_type = CAMERA_ICC_TYPE_PHASE_ONE; - } else if (strstr(copyright, "Nikon Corporation") != nullptr) { + } else if (strstr (copyright, "Nikon Corporation") != nullptr) { camera_icc_type = CAMERA_ICC_TYPE_NIKON; } } // Initialize transform cmsHTRANSFORM hTransform; - cmsHPROFILE prophoto = iccStore->workingSpace("ProPhoto"); // We always use Prophoto to apply the ICC profile to minimize problems with clipping in LUT conversion. + cmsHPROFILE prophoto = iccStore->workingSpace ("ProPhoto"); // We always use Prophoto to apply the ICC profile to minimize problems with clipping in LUT conversion. bool transform_via_pcs_lab = false; bool separate_pcs_lab_highlights = false; lcmsMutex->lock (); @@ -3946,28 +3944,28 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam #pragma omp parallel #endif { - AlignedBuffer buffer(im->width * 3); - AlignedBuffer hl_buffer(im->width * 3); - AlignedBuffer hl_scale(im->width); + AlignedBuffer buffer (im->width * 3); + AlignedBuffer hl_buffer (im->width * 3); + AlignedBuffer hl_scale (im->width); #ifdef _OPENMP #pragma omp for schedule(static) #endif for ( int h = 0; h < im->height; ++h ) { - float *p = buffer.data, *pR = im->r(h), *pG = im->g(h), *pB = im->b(h); + float *p = buffer.data, *pR = im->r (h), *pG = im->g (h), *pB = im->b (h); // Apply pre-processing for ( int w = 0; w < im->width; ++w ) { - float r = *(pR++); - float g = *(pG++); - float b = *(pB++); + float r = * (pR++); + float g = * (pG++); + float b = * (pB++); // convert to 0-1 range as LCMS expects that r /= 65535.0f; g /= 65535.0f; b /= 65535.0f; - float maxc = max(r, g, b); + float maxc = max (r, g, b); if (maxc <= 1.0) { hl_scale.data[w] = 1.0; @@ -3987,12 +3985,12 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam // We will revert this curve after we've made the color transform. However when we revert the curve, we'll notice that // highlight rendering suffers due to that the LUT transform don't expand well, therefore we do a less compressed // conversion too and mix them, this gives us the highest quality and most flexible result. - hl_buffer.data[3 * w + 0] = pow_F(r, 1.0 / 1.8); - hl_buffer.data[3 * w + 1] = pow_F(g, 1.0 / 1.8); - hl_buffer.data[3 * w + 2] = pow_F(b, 1.0 / 1.8); - r = phaseOneIccCurveInv->getVal(r); - g = phaseOneIccCurveInv->getVal(g); - b = phaseOneIccCurveInv->getVal(b); + hl_buffer.data[3 * w + 0] = pow_F (r, 1.0 / 1.8); + hl_buffer.data[3 * w + 1] = pow_F (g, 1.0 / 1.8); + hl_buffer.data[3 * w + 2] = pow_F (b, 1.0 / 1.8); + r = phaseOneIccCurveInv->getVal (r); + g = phaseOneIccCurveInv->getVal (g); + b = phaseOneIccCurveInv->getVal (b); break; case CAMERA_ICC_TYPE_LEAF: { @@ -4000,20 +3998,20 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam float newr = leaf_prophoto_mat[0][0] * r + leaf_prophoto_mat[0][1] * g + leaf_prophoto_mat[0][2] * b; float newg = leaf_prophoto_mat[1][0] * r + leaf_prophoto_mat[1][1] * g + leaf_prophoto_mat[1][2] * b; float newb = leaf_prophoto_mat[2][0] * r + leaf_prophoto_mat[2][1] * g + leaf_prophoto_mat[2][2] * b; - hl_buffer.data[3 * w + 0] = pow_F(newr, 1.0 / 1.8); - hl_buffer.data[3 * w + 1] = pow_F(newg, 1.0 / 1.8); - hl_buffer.data[3 * w + 2] = pow_F(newb, 1.0 / 1.8); - r = phaseOneIccCurveInv->getVal(newr); - g = phaseOneIccCurveInv->getVal(newg); - b = phaseOneIccCurveInv->getVal(newb); + hl_buffer.data[3 * w + 0] = pow_F (newr, 1.0 / 1.8); + hl_buffer.data[3 * w + 1] = pow_F (newg, 1.0 / 1.8); + hl_buffer.data[3 * w + 2] = pow_F (newb, 1.0 / 1.8); + r = phaseOneIccCurveInv->getVal (newr); + g = phaseOneIccCurveInv->getVal (newg); + b = phaseOneIccCurveInv->getVal (newb); break; } case CAMERA_ICC_TYPE_NIKON: // gamma 0.5 - r = sqrtf(r); - g = sqrtf(g); - b = sqrtf(b); + r = sqrtf (r); + g = sqrtf (g); + b = sqrtf (b); break; case CAMERA_ICC_TYPE_GENERIC: @@ -4022,9 +4020,9 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam break; } - *(p++) = r; - *(p++) = g; - *(p++) = b; + * (p++) = r; + * (p++) = g; + * (p++) = b; } // Run icc transform @@ -4036,28 +4034,28 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam // Apply post-processing p = buffer.data; - pR = im->r(h); - pG = im->g(h); - pB = im->b(h); + pR = im->r (h); + pG = im->g (h); + pB = im->b (h); for ( int w = 0; w < im->width; ++w ) { float r, g, b, hr, hg, hb; if (transform_via_pcs_lab) { - float L = *(p++); - float A = *(p++); - float B = *(p++); + float L = * (p++); + float A = * (p++); + float B = * (p++); // profile connection space CIELAB should have D50 illuminant - lab2ProphotoRgbD50(L, A, B, r, g, b); + lab2ProphotoRgbD50 (L, A, B, r, g, b); if (separate_pcs_lab_highlights) { - lab2ProphotoRgbD50(hl_buffer.data[3 * w + 0], hl_buffer.data[3 * w + 1], hl_buffer.data[3 * w + 2], hr, hg, hb); + lab2ProphotoRgbD50 (hl_buffer.data[3 * w + 0], hl_buffer.data[3 * w + 1], hl_buffer.data[3 * w + 2], hr, hg, hb); } } else { - r = *(p++); - g = *(p++); - b = *(p++); + r = * (p++); + g = * (p++); + b = * (p++); } // restore pre-processing and/or add post-processing for the various ICC types @@ -4068,15 +4066,15 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam case CAMERA_ICC_TYPE_PHASE_ONE: case CAMERA_ICC_TYPE_LEAF: { // note the 1/1.8 gamma, it's the gamma that the profile has applied, which we must revert before we can revert the curve - r = phaseOneIccCurve->getVal(pow_F(r, 1.0 / 1.8)); - g = phaseOneIccCurve->getVal(pow_F(g, 1.0 / 1.8)); - b = phaseOneIccCurve->getVal(pow_F(b, 1.0 / 1.8)); + r = phaseOneIccCurve->getVal (pow_F (r, 1.0 / 1.8)); + g = phaseOneIccCurve->getVal (pow_F (g, 1.0 / 1.8)); + b = phaseOneIccCurve->getVal (pow_F (b, 1.0 / 1.8)); const float mix = 0.25; // may seem a low number, but remember this is linear space, mixing starts 2 stops from clipping - const float maxc = max(r, g, b); + const float maxc = max (r, g, b); if (maxc > mix) { float fac = (maxc - mix) / (1.0 - mix); - fac = sqrtf(sqrtf(fac)); // gamma 0.25 to mix in highlight render relatively quick + fac = sqrtf (sqrtf (fac)); // gamma 0.25 to mix in highlight render relatively quick r = (1.0 - fac) * r + fac * hr; g = (1.0 - fac) * g + fac * hg; b = (1.0 - fac) * b + fac * hb; @@ -4121,13 +4119,13 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam g *= 65535.0; b *= 65535.0; - *(pR++) = r; - *(pG++) = g; - *(pB++) = b; + * (pR++) = r; + * (pG++) = g; + * (pB++) = b; } } } // End of parallelization - cmsDeleteTransform(hTransform); + cmsDeleteTransform (hTransform); } //t3.set (); @@ -4136,7 +4134,7 @@ void RawImageSource::colorSpaceConversion_ (Imagefloat* im, ColorManagementParam // Determine RAW input and output profiles. Returns TRUE on success -bool RawImageSource::findInputProfile(Glib::ustring inProfile, cmsHPROFILE embedded, std::string camName, DCPProfile **dcpProf, cmsHPROFILE& in) +bool RawImageSource::findInputProfile (Glib::ustring inProfile, cmsHPROFILE embedded, std::string camName, DCPProfile **dcpProf, cmsHPROFILE& in) { in = nullptr; // cam will be taken on NULL *dcpProf = nullptr; @@ -4149,20 +4147,20 @@ bool RawImageSource::findInputProfile(Glib::ustring inProfile, cmsHPROFILE embed in = embedded; } else if (inProfile == "(cameraICC)") { // DCPs have higher quality, so use them first - *dcpProf = DCPStore::getInstance()->getStdProfile(camName); + *dcpProf = DCPStore::getInstance()->getStdProfile (camName); if (*dcpProf == nullptr) { - in = iccStore->getStdProfile(camName); + in = iccStore->getStdProfile (camName); } } else if (inProfile != "(camera)" && inProfile != "") { Glib::ustring normalName = inProfile; if (!inProfile.compare (0, 5, "file:")) { - normalName = inProfile.substr(5); + normalName = inProfile.substr (5); } - if (DCPStore::getInstance()->isValidDCPFileName(normalName)) { - *dcpProf = DCPStore::getInstance()->getProfile(normalName); + if (DCPStore::getInstance()->isValidDCPFileName (normalName)) { + *dcpProf = DCPStore::getInstance()->getProfile (normalName); } if (*dcpProf == nullptr) { @@ -4178,7 +4176,7 @@ bool RawImageSource::findInputProfile(Glib::ustring inProfile, cmsHPROFILE embed //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% // derived from Dcraw "blend_highlights()" // very effective to reduce (or remove) the magenta, but with levels of grey ! -void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int width, float maxval, float* hlmax) +void RawImageSource::HLRecovery_blend (float* rin, float* gin, float* bin, int width, float maxval, float* hlmax) { const int ColorCount = 3; @@ -4194,7 +4192,7 @@ void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int wi #define FOREACHCOLOR for (int c=0; c < ColorCount; c++) - float minpt = min(hlmax[0], hlmax[1], hlmax[2]); //min of the raw clip points + float minpt = min (hlmax[0], hlmax[1], hlmax[2]); //min of the raw clip points //float maxpt=max(hlmax[0],hlmax[1],hlmax[2]);//max of the raw clip points //float medpt=hlmax[0]+hlmax[1]+hlmax[2]-minpt-maxpt;//median of the raw clip points float maxave = (hlmax[0] + hlmax[1] + hlmax[2]) / 3; //ave of the raw clip points @@ -4204,7 +4202,7 @@ void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int wi const float satthresh = 0.5; float clip[3]; - FOREACHCOLOR clip[c] = min(maxave, hlmax[c]); + FOREACHCOLOR clip[c] = min (maxave, hlmax[c]); // Determine the maximum level (clip) of all channels const float clippt = clipthresh * maxval; @@ -4235,9 +4233,9 @@ void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int wi // Initialize cam with raw input [0] and potentially clipped input [1] FOREACHCOLOR { - lratio += min(rgb[c], clip[c]); + lratio += min (rgb[c], clip[c]); cam[0][c] = rgb[c]; - cam[1][c] = min(cam[0][c], maxval); + cam[1][c] = min (cam[0][c], maxval); } // Calculate the lightness correction ratio (chratio) @@ -4254,11 +4252,11 @@ void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int wi sum[i] = 0; for (int c = 1; c < ColorCount; c++) { - sum[i] += SQR(lab[i][c]); + sum[i] += SQR (lab[i][c]); } } - chratio = (sqrt(sum[1] / sum[0])); + chratio = (sqrt (sum[1] / sum[0])); // Apply ratio to lightness in LCH space for (int c = 1; c < ColorCount; c++) { @@ -4278,18 +4276,18 @@ void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int wi // Copy converted pixel back if (rin[col] > fixpt) { - float rfrac = SQR((min(clip[0], rin[col]) - fixpt) / (clip[0] - fixpt)); - rin[col] = min(maxave, rfrac * rgb[0] + (1 - rfrac) * rin[col]); + float rfrac = SQR ((min (clip[0], rin[col]) - fixpt) / (clip[0] - fixpt)); + rin[col] = min (maxave, rfrac * rgb[0] + (1 - rfrac) * rin[col]); } if (gin[col] > fixpt) { - float gfrac = SQR((min(clip[1], gin[col]) - fixpt) / (clip[1] - fixpt)); - gin[col] = min(maxave, gfrac * rgb[1] + (1 - gfrac) * gin[col]); + float gfrac = SQR ((min (clip[1], gin[col]) - fixpt) / (clip[1] - fixpt)); + gin[col] = min (maxave, gfrac * rgb[1] + (1 - gfrac) * gin[col]); } if (bin[col] > fixpt) { - float bfrac = SQR((min(clip[2], bin[col]) - fixpt) / (clip[2] - fixpt)); - bin[col] = min(maxave, bfrac * rgb[2] + (1 - bfrac) * bin[col]); + float bfrac = SQR ((min (clip[2], bin[col]) - fixpt) / (clip[2] - fixpt)); + bin[col] = min (maxave, bfrac * rgb[2] + (1 - bfrac) * bin[col]); } lratio /= (rin[col] + gin[col] + bin[col]); @@ -4301,7 +4299,7 @@ void RawImageSource::HLRecovery_blend(float* rin, float* gin, float* bin, int wi bin[col] = L + H / 3.0; if ((L = (rin[col] + gin[col] + bin[col]) / 3) > desatpt) { - float Lfrac = max(0.0f, (maxave - L) / (maxave - desatpt)); + float Lfrac = max (0.0f, (maxave - L) / (maxave - desatpt)); C = Lfrac * 1.732050808 * (rin[col] - gin[col]); H = Lfrac * (2 * bin[col] - rin[col] - gin[col]); rin[col] = L - H / 6.0 + C / 3.464101615; @@ -4318,9 +4316,9 @@ void RawImageSource::HLRecovery_Luminance (float* rin, float* gin, float* bin, f float r = rin[i], g = gin[i], b = bin[i]; if (r > maxval || g > maxval || b > maxval) { - float ro = min(r, maxval); - float go = min(g, maxval); - float bo = min(b, maxval); + float ro = min (r, maxval); + float go = min (g, maxval); + float bo = min (b, maxval); double L = r + g + b; double C = 1.732050808 * (r - g); double H = 2 * b - r - g; @@ -4368,24 +4366,24 @@ void RawImageSource::HLRecovery_CIELab (float* rin, float* gin, float* bin, floa float r = rin[i], g = gin[i], b = bin[i]; if (r > maxval || g > maxval || b > maxval) { - float ro = min(r, maxval); - float go = min(g, maxval); - float bo = min(b, maxval); + float ro = min (r, maxval); + float go = min (g, maxval); + float bo = min (b, maxval); float yy = xyz_cam[1][0] * r + xyz_cam[1][1] * g + xyz_cam[1][2] * b; - float fy = (yy < 65535.0 ? Color::cachef[yy] / 327.68 : std::cbrt(yy / MAXVALD)); + float fy = (yy < 65535.0 ? Color::cachef[yy] / 327.68 : std::cbrt (yy / MAXVALD)); // compute LCH decompostion of the clipped pixel (only color information, thus C and H will be used) float x = xyz_cam[0][0] * ro + xyz_cam[0][1] * go + xyz_cam[0][2] * bo; float y = xyz_cam[1][0] * ro + xyz_cam[1][1] * go + xyz_cam[1][2] * bo; float z = xyz_cam[2][0] * ro + xyz_cam[2][1] * go + xyz_cam[2][2] * bo; - x = (x < 65535.0 ? Color::cachef[x] / 327.68 : std::cbrt(x / MAXVALD)); - y = (y < 65535.0 ? Color::cachef[y] / 327.68 : std::cbrt(y / MAXVALD)); - z = (z < 65535.0 ? Color::cachef[z] / 327.68 : std::cbrt(z / MAXVALD)); + x = (x < 65535.0 ? Color::cachef[x] / 327.68 : std::cbrt (x / MAXVALD)); + y = (y < 65535.0 ? Color::cachef[y] / 327.68 : std::cbrt (y / MAXVALD)); + z = (z < 65535.0 ? Color::cachef[z] / 327.68 : std::cbrt (z / MAXVALD)); // convert back to rgb double fz = fy - y + z; double fx = fy + x - y; - double zr = Color::f2xyz(fz); - double xr = Color::f2xyz(fx); + double zr = Color::f2xyz (fz); + double xr = Color::f2xyz (fx); x = xr * 65535.0 ; y = yy; @@ -4413,9 +4411,8 @@ void RawImageSource::hlRecovery (const std::string &method, float* red, float* g HLRecovery_Luminance (red, green, blue, red, green, blue, width, 65535.0); } else if (method == "CIELab blending") { HLRecovery_CIELab (red, green, blue, red, green, blue, width, 65535.0, imatrices.xyz_cam, imatrices.cam_xyz); - } - else if (method == "Blend") { // derived from Dcraw - HLRecovery_blend(red, green, blue, width, 65535.0, hlmax); + } else if (method == "Blend") { // derived from Dcraw + HLRecovery_blend (red, green, blue, width, 65535.0, hlmax); } } @@ -4427,15 +4424,15 @@ void RawImageSource::getAutoExpHistogram (LUTu & histogram, int& histcompr) // BENCHFUN histcompr = 3; - histogram(65536 >> histcompr); + histogram (65536 >> histcompr); histogram.clear(); - const float refwb[3] = {static_cast(refwb_red / (1 << histcompr)), static_cast(refwb_green / (1 << histcompr)), static_cast(refwb_blue / (1 << histcompr))}; + const float refwb[3] = {static_cast (refwb_red / (1 << histcompr)), static_cast (refwb_green / (1 << histcompr)), static_cast (refwb_blue / (1 << histcompr))}; #ifdef _OPENMP #pragma omp parallel #endif { - LUTu tmphistogram(histogram.getSize()); + LUTu tmphistogram (histogram.getSize()); tmphistogram.clear(); #ifdef _OPENMP #pragma omp for schedule(dynamic,16) nowait @@ -4447,49 +4444,49 @@ void RawImageSource::getAutoExpHistogram (LUTu & histogram, int& histcompr) if (ri->getSensorType() == ST_BAYER) { // precalculate factors to avoid expensive per pixel calculations - float refwb0 = refwb[ri->FC(i, start)]; - float refwb1 = refwb[ri->FC(i, start + 1)]; + float refwb0 = refwb[ri->FC (i, start)]; + float refwb1 = refwb[ri->FC (i, start + 1)]; int j; for (j = start; j < end - 1; j += 2) { - tmphistogram[(int)(refwb0 * rawData[i][j])] += 4; - tmphistogram[(int)(refwb1 * rawData[i][j + 1])] += 4; + tmphistogram[ (int) (refwb0 * rawData[i][j])] += 4; + tmphistogram[ (int) (refwb1 * rawData[i][j + 1])] += 4; } - if(j < end) { - tmphistogram[(int)(refwb0 * rawData[i][j])] += 4; + if (j < end) { + tmphistogram[ (int) (refwb0 * rawData[i][j])] += 4; } } else if (ri->getSensorType() == ST_FUJI_XTRANS) { // precalculate factors to avoid expensive per pixel calculations - float refwb0 = refwb[ri->XTRANSFC(i, start)]; - float refwb1 = refwb[ri->XTRANSFC(i, start + 1)]; - float refwb2 = refwb[ri->XTRANSFC(i, start + 2)]; - float refwb3 = refwb[ri->XTRANSFC(i, start + 3)]; - float refwb4 = refwb[ri->XTRANSFC(i, start + 4)]; - float refwb5 = refwb[ri->XTRANSFC(i, start + 5)]; + float refwb0 = refwb[ri->XTRANSFC (i, start)]; + float refwb1 = refwb[ri->XTRANSFC (i, start + 1)]; + float refwb2 = refwb[ri->XTRANSFC (i, start + 2)]; + float refwb3 = refwb[ri->XTRANSFC (i, start + 3)]; + float refwb4 = refwb[ri->XTRANSFC (i, start + 4)]; + float refwb5 = refwb[ri->XTRANSFC (i, start + 5)]; int j; for (j = start; j < end - 5; j += 6) { - tmphistogram[(int)(refwb0 * rawData[i][j])] += 4; - tmphistogram[(int)(refwb1 * rawData[i][j + 1])] += 4; - tmphistogram[(int)(refwb2 * rawData[i][j + 2])] += 4; - tmphistogram[(int)(refwb3 * rawData[i][j + 3])] += 4; - tmphistogram[(int)(refwb4 * rawData[i][j + 4])] += 4; - tmphistogram[(int)(refwb5 * rawData[i][j + 5])] += 4; + tmphistogram[ (int) (refwb0 * rawData[i][j])] += 4; + tmphistogram[ (int) (refwb1 * rawData[i][j + 1])] += 4; + tmphistogram[ (int) (refwb2 * rawData[i][j + 2])] += 4; + tmphistogram[ (int) (refwb3 * rawData[i][j + 3])] += 4; + tmphistogram[ (int) (refwb4 * rawData[i][j + 4])] += 4; + tmphistogram[ (int) (refwb5 * rawData[i][j + 5])] += 4; } for (; j < end; j++) { - tmphistogram[(int)(refwb[ri->XTRANSFC(i, j)] * rawData[i][j])] += 4; + tmphistogram[ (int) (refwb[ri->XTRANSFC (i, j)] * rawData[i][j])] += 4; } } else if (ri->get_colors() == 1) { for (int j = start; j < end; j++) { - tmphistogram[(int)(refwb_red * rawData[i][j])]++; + tmphistogram[ (int) (refwb_red * rawData[i][j])]++; } } else { for (int j = start; j < end; j++) { - tmphistogram[CLIP((int)(refwb_red * rawData[i][3 * j + 0]))]++; - tmphistogram[CLIP((int)(refwb_green * rawData[i][3 * j + 1]))] += 2; - tmphistogram[CLIP((int)(refwb_blue * rawData[i][3 * j + 2]))]++; + tmphistogram[CLIP ((int) (refwb_red * rawData[i][3 * j + 0]))]++; + tmphistogram[CLIP ((int) (refwb_green * rawData[i][3 * j + 1]))] += 2; + tmphistogram[CLIP ((int) (refwb_blue * rawData[i][3 * j + 2]))]++; } } } @@ -4510,53 +4507,53 @@ void RawImageSource::getRAWHistogram (LUTu & histRedRaw, LUTu & histGreenRaw, LU histRedRaw.clear(); histGreenRaw.clear(); histBlueRaw.clear(); - const float mult[4] = { 65535.0f / ri->get_white(0), - 65535.0f / ri->get_white(1), - 65535.0f / ri->get_white(2), - 65535.0f / ri->get_white(3) + const float mult[4] = { 65535.0f / ri->get_white (0), + 65535.0f / ri->get_white (1), + 65535.0f / ri->get_white (2), + 65535.0f / ri->get_white (3) }; - const bool fourColours = ri->getSensorType() == ST_BAYER && ((mult[1] != mult[3] || cblacksom[1] != cblacksom[3]) || FC(0, 0) == 3 || FC(0, 1) == 3 || FC(1, 0) == 3 || FC(1, 1) == 3); + const bool fourColours = ri->getSensorType() == ST_BAYER && ((mult[1] != mult[3] || cblacksom[1] != cblacksom[3]) || FC (0, 0) == 3 || FC (0, 1) == 3 || FC (1, 0) == 3 || FC (1, 1) == 3); constexpr int histoSize = 65536; LUTu hist[4]; - hist[0](histoSize); + hist[0] (histoSize); hist[0].clear(); if (ri->get_colors() > 1) { - hist[1](histoSize); + hist[1] (histoSize); hist[1].clear(); - hist[2](histoSize); + hist[2] (histoSize); hist[2].clear(); } if (fourColours) { - hist[3](histoSize); + hist[3] (histoSize); hist[3].clear(); } #ifdef _OPENMP int numThreads; // reduce the number of threads under certain conditions to avoid overhead of too many critical regions - numThreads = sqrt((((H - 2 * border) * (W - 2 * border)) / 262144.f)); - numThreads = std::min(std::max(numThreads, 1), omp_get_max_threads()); + numThreads = sqrt ((((H - 2 * border) * (W - 2 * border)) / 262144.f)); + numThreads = std::min (std::max (numThreads, 1), omp_get_max_threads()); #pragma omp parallel num_threads(numThreads) #endif { // we need one LUT per color and thread, which corresponds to 1 MB per thread LUTu tmphist[4]; - tmphist[0](histoSize); + tmphist[0] (histoSize); tmphist[0].clear(); if (ri->get_colors() > 1) { - tmphist[1](histoSize); + tmphist[1] (histoSize); tmphist[1].clear(); - tmphist[2](histoSize); + tmphist[2] (histoSize); tmphist[2].clear(); if (fourColours) { - tmphist[3](histoSize); + tmphist[3] (histoSize); tmphist[3].clear(); } } @@ -4571,32 +4568,32 @@ void RawImageSource::getRAWHistogram (LUTu & histRedRaw, LUTu & histGreenRaw, LU if (ri->getSensorType() == ST_BAYER) { int j; - int c1 = FC(i, start); - c1 = ( fourColours && c1 == 1 && !(i & 1) ) ? 3 : c1; - int c2 = FC(i, start + 1); - c2 = ( fourColours && c2 == 1 && !(i & 1) ) ? 3 : c2; + int c1 = FC (i, start); + c1 = ( fourColours && c1 == 1 && ! (i & 1) ) ? 3 : c1; + int c2 = FC (i, start + 1); + c2 = ( fourColours && c2 == 1 && ! (i & 1) ) ? 3 : c2; for (j = start; j < end - 1; j += 2) { - tmphist[c1][(int)ri->data[i][j]]++; - tmphist[c2][(int)ri->data[i][j + 1]]++; + tmphist[c1][ (int)ri->data[i][j]]++; + tmphist[c2][ (int)ri->data[i][j + 1]]++; } - if(j < end) { // last pixel of row if width is odd - tmphist[c1][(int)ri->data[i][j]]++; + if (j < end) { // last pixel of row if width is odd + tmphist[c1][ (int)ri->data[i][j]]++; } } else if (ri->get_colors() == 1) { for (int j = start; j < end; j++) { - tmphist[0][(int)ri->data[i][j]]++; + tmphist[0][ (int)ri->data[i][j]]++; } - } else if(ri->getSensorType() == ST_FUJI_XTRANS) { + } else if (ri->getSensorType() == ST_FUJI_XTRANS) { for (int j = start; j < end - 1; j += 2) { - int c = ri->XTRANSFC(i, j); - tmphist[c][(int)ri->data[i][j]]++; + int c = ri->XTRANSFC (i, j); + tmphist[c][ (int)ri->data[i][j]]++; } } else { for (int j = start; j < end; j++) { for (int c = 0; c < 3; c++) { - tmphist[c][(int)ri->data[i][3 * j + c]]++; + tmphist[c][ (int)ri->data[i][3 * j + c]]++; } } } @@ -4619,21 +4616,21 @@ void RawImageSource::getRAWHistogram (LUTu & histRedRaw, LUTu & histGreenRaw, LU } // end of critical region } // end of parallel region - for(int i = 0; i < 65536; i++) { + for (int i = 0; i < 65536; i++) { int idx; - idx = CLIP((int)Color::gamma(mult[0] * (i - (cblacksom[0]/*+black_lev[0]*/)))); + idx = CLIP ((int)Color::gamma (mult[0] * (i - (cblacksom[0]/*+black_lev[0]*/)))); histRedRaw[idx >> 8] += hist[0][i]; if (ri->get_colors() > 1) { - idx = CLIP((int)Color::gamma(mult[1] * (i - (cblacksom[1]/*+black_lev[1]*/)))); + idx = CLIP ((int)Color::gamma (mult[1] * (i - (cblacksom[1]/*+black_lev[1]*/)))); histGreenRaw[idx >> 8] += hist[1][i]; if (fourColours) { - idx = CLIP((int)Color::gamma(mult[3] * (i - (cblacksom[3]/*+black_lev[3]*/)))); + idx = CLIP ((int)Color::gamma (mult[3] * (i - (cblacksom[3]/*+black_lev[3]*/)))); histGreenRaw[idx >> 8] += hist[3][i]; } - idx = CLIP((int)Color::gamma(mult[2] * (i - (cblacksom[2]/*+black_lev[2]*/)))); + idx = CLIP ((int)Color::gamma (mult[2] * (i - (cblacksom[2]/*+black_lev[2]*/)))); histBlueRaw[idx >> 8] += hist[2][i]; } } @@ -4642,11 +4639,11 @@ void RawImageSource::getRAWHistogram (LUTu & histRedRaw, LUTu & histGreenRaw, LU for (int i = 0; i < 256; i++) { histGreenRaw[i] >>= 1; } - else if(ri->getSensorType() == ST_FUJI_XTRANS) // since Xtrans has 2.5 as many greens, correct for it + else if (ri->getSensorType() == ST_FUJI_XTRANS) // since Xtrans has 2.5 as many greens, correct for it for (int i = 0; i < 256; i++) { histGreenRaw[i] = (histGreenRaw[i] * 2) / 5; } - else if(ri->get_colors() == 1) { // monochrome sensor => set all histograms equal + else if (ri->get_colors() == 1) { // monochrome sensor => set all histograms equal histGreenRaw += histRedRaw; histBlueRaw += histRedRaw; } @@ -4659,8 +4656,8 @@ void RawImageSource::getRowStartEnd (int x, int &start, int &end) { if (fuji) { int fw = ri->get_FujiWidth(); - start = ABS(fw - x) + border; - end = min(H + W - fw - x, fw + x) - border; + start = ABS (fw - x) + border; + end = min (H + W - fw - x, fw + x) - border; } else { start = border; end = W - border; @@ -4700,14 +4697,14 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) if (fuji) { for (int i = 32; i < H - 32; i++) { int fw = ri->get_FujiWidth(); - int start = ABS(fw - i) + 32; - int end = min(H + W - fw - i, fw + i) - 32; + int start = ABS (fw - i) + 32; + int end = min (H + W - fw - i, fw + i) - 32; for (int j = start; j < end; j++) { if (ri->getSensorType() != ST_BAYER) { - double dr = CLIP(initialGain * (rawData[i][3 * j] )); - double dg = CLIP(initialGain * (rawData[i][3 * j + 1])); - double db = CLIP(initialGain * (rawData[i][3 * j + 2])); + double dr = CLIP (initialGain * (rawData[i][3 * j] )); + double dg = CLIP (initialGain * (rawData[i][3 * j + 1])); + double db = CLIP (initialGain * (rawData[i][3 * j + 2])); if (dr > clipHigh || dg > clipHigh || db > clipHigh) { continue; @@ -4718,8 +4715,8 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) avg_b += db; rn = gn = ++bn; } else { - int c = FC( i, j); - double d = CLIP(initialGain * (rawData[i][j])); + int c = FC ( i, j); + double d = CLIP (initialGain * (rawData[i][j])); if (d > clipHigh) { continue; @@ -4741,7 +4738,7 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) } } else { if (ri->getSensorType() != ST_BAYER) { - if(ri->getSensorType() == ST_FUJI_XTRANS) { + if (ri->getSensorType() == ST_FUJI_XTRANS) { const double compval = clipHigh / initialGain; #ifdef _OPENMP #pragma omp parallel @@ -4762,7 +4759,7 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) continue; } - int c = ri->XTRANSFC(i, j); + int c = ri->XTRANSFC (i, j); avg_c[c] += d; cn[c]++; } @@ -4788,9 +4785,9 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) for (int j = 32; j < W - 32; j++) { // each loop read 1 rgb triplet value - double dr = CLIP(initialGain * (rawData[i][3 * j] )); - double dg = CLIP(initialGain * (rawData[i][3 * j + 1])); - double db = CLIP(initialGain * (rawData[i][3 * j + 2])); + double dr = CLIP (initialGain * (rawData[i][3 * j] )); + double dg = CLIP (initialGain * (rawData[i][3 * j + 1])); + double db = CLIP (initialGain * (rawData[i][3 * j + 2])); if (dr > clipHigh || dg > clipHigh || db > clipHigh) { continue; @@ -4809,8 +4806,8 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) //determine GRBG coset; (ey,ex) is the offset of the R subarray int ey, ex; - if (ri->ISGREEN(0, 0)) { //first pixel is G - if (ri->ISRED(0, 1)) { + if (ri->ISGREEN (0, 0)) { //first pixel is G + if (ri->ISRED (0, 1)) { ey = 0; ex = 1; } else { @@ -4818,7 +4815,7 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) ex = 0; } } else {//first pixel is R or B - if (ri->ISRED(0, 0)) { + if (ri->ISRED (0, 0)) { ey = 0; ex = 0; } else { @@ -4869,7 +4866,7 @@ void RawImageSource::getAutoWBMultipliers (double &rm, double &gm, double &bm) } } - if( settings->verbose ) { + if ( settings->verbose ) { printf ("AVG: %g %g %g\n", avg_r / rn, avg_g / gn, avg_b / bn); } @@ -4896,7 +4893,7 @@ ColorTemp RawImageSource::getSpotWB (std::vector &red, std::vectorgetSensorType() != ST_BAYER) { - if(ri->getSensorType() == ST_FUJI_XTRANS) { + if (ri->getSensorType() == ST_FUJI_XTRANS) { int d[9][2] = {{0, 0}, { -1, -1}, { -1, 0}, { -1, 1}, {0, -1}, {0, 1}, {1, -1}, {1, 0}, {1, 1}}; for (size_t i = 0; i < red.size(); i++) { @@ -4909,12 +4906,12 @@ ColorTemp RawImageSource::getSpotWB (std::vector &red, std::vector= 0 && yv >= 0 && xv < W && yv < H) { - if (ri->ISXTRANSRED(yv, xv)) { //RED + if (xv >= 0 && yv >= 0 && xv < W && yv < H) { + if (ri->ISXTRANSRED (yv, xv)) { //RED rloc += (rawData[yv][xv]); rnbrs++; continue; - } else if (ri->ISXTRANSBLUE(yv, xv)) { //BLUE + } else if (ri->ISXTRANSBLUE (yv, xv)) { //BLUE bloc += (rawData[yv][xv]); bnbrs++; continue; @@ -4953,10 +4950,10 @@ ColorTemp RawImageSource::getSpotWB (std::vector &red, std::vector= 0 && ymin >= 0 && xmax < W && ymax < H) { reds += (rawData[yr][3 * xr] ); @@ -4980,9 +4977,9 @@ ColorTemp RawImageSource::getSpotWB (std::vector &red, std::vector= 0 && yv >= 0 && xv < W && yv < H) { + if (xv >= 0 && yv >= 0 && xv < W && yv < H) { if (c == 0) { //RED rloc += (rawData[yv][xv]); rnbrs++; @@ -5016,9 +5013,9 @@ ColorTemp RawImageSource::getSpotWB (std::vector &red, std::vector= 0 && yv >= 0 && xv < W && yv < H) { + if (xv >= 0 && yv >= 0 && xv < W && yv < H) { if (c == 0) { //RED rloc += (rawData[yv][xv]); rnbrs++; @@ -5052,9 +5049,9 @@ ColorTemp RawImageSource::getSpotWB (std::vector &red, std::vector= 0 && yv >= 0 && xv < W && yv < H) { + if (xv >= 0 && yv >= 0 && xv < W && yv < H) { if (c == 0) { //RED rloc += (rawData[yv][xv]); rnbrs++; @@ -5173,13 +5170,13 @@ void RawImageSource::inverse33 (const double (*rgb_cam)[3], double (*cam_rgb)[3] rgb_cam[0][2] * rgb_cam[1][0] * rgb_cam[2][1] + rgb_cam[0][0] * rgb_cam[1][2] * rgb_cam[2][1] + rgb_cam[0][1] * rgb_cam[1][0] * rgb_cam[2][2] - rgb_cam[0][0] * rgb_cam[1][1] * rgb_cam[2][2] ); cam_rgb[0][0] = (rgb_cam[1][2] * rgb_cam[2][1] - rgb_cam[1][1] * rgb_cam[2][2]) / nom; - cam_rgb[0][1] = -(rgb_cam[0][2] * rgb_cam[2][1] - rgb_cam[0][1] * rgb_cam[2][2]) / nom; + cam_rgb[0][1] = - (rgb_cam[0][2] * rgb_cam[2][1] - rgb_cam[0][1] * rgb_cam[2][2]) / nom; cam_rgb[0][2] = (rgb_cam[0][2] * rgb_cam[1][1] - rgb_cam[0][1] * rgb_cam[1][2]) / nom; - cam_rgb[1][0] = -(rgb_cam[1][2] * rgb_cam[2][0] - rgb_cam[1][0] * rgb_cam[2][2]) / nom; + cam_rgb[1][0] = - (rgb_cam[1][2] * rgb_cam[2][0] - rgb_cam[1][0] * rgb_cam[2][2]) / nom; cam_rgb[1][1] = (rgb_cam[0][2] * rgb_cam[2][0] - rgb_cam[0][0] * rgb_cam[2][2]) / nom; - cam_rgb[1][2] = -(rgb_cam[0][2] * rgb_cam[1][0] - rgb_cam[0][0] * rgb_cam[1][2]) / nom; + cam_rgb[1][2] = - (rgb_cam[0][2] * rgb_cam[1][0] - rgb_cam[0][0] * rgb_cam[1][2]) / nom; cam_rgb[2][0] = (rgb_cam[1][1] * rgb_cam[2][0] - rgb_cam[1][0] * rgb_cam[2][1]) / nom; - cam_rgb[2][1] = -(rgb_cam[0][1] * rgb_cam[2][0] - rgb_cam[0][0] * rgb_cam[2][1]) / nom; + cam_rgb[2][1] = - (rgb_cam[0][1] * rgb_cam[2][0] - rgb_cam[0][0] * rgb_cam[2][1]) / nom; cam_rgb[2][2] = (rgb_cam[0][1] * rgb_cam[1][0] - rgb_cam[0][0] * rgb_cam[1][1]) / nom; } @@ -5213,19 +5210,19 @@ void RawImageSource::init () 0.9918364233, 0.9052109560, 1.0000000000, 1.0000000000 }; std::vector cForwardPoints; - cForwardPoints.push_back(double(DCT_Spline)); // The first value is the curve type + cForwardPoints.push_back (double (DCT_Spline)); // The first value is the curve type std::vector cInversePoints; - cInversePoints.push_back(double(DCT_Spline)); // The first value is the curve type + cInversePoints.push_back (double (DCT_Spline)); // The first value is the curve type - for (int i = 0; i < sizeof(phase_one_forward) / sizeof(phase_one_forward[0]); i += 2) { - cForwardPoints.push_back(phase_one_forward[i + 0]); - cForwardPoints.push_back(phase_one_forward[i + 1]); - cInversePoints.push_back(phase_one_forward[i + 1]); - cInversePoints.push_back(phase_one_forward[i + 0]); + for (int i = 0; i < sizeof (phase_one_forward) / sizeof (phase_one_forward[0]); i += 2) { + cForwardPoints.push_back (phase_one_forward[i + 0]); + cForwardPoints.push_back (phase_one_forward[i + 1]); + cInversePoints.push_back (phase_one_forward[i + 1]); + cInversePoints.push_back (phase_one_forward[i + 0]); } - phaseOneIccCurve = new DiagonalCurve(cForwardPoints, CURVES_MIN_POLY_POINTS); - phaseOneIccCurveInv = new DiagonalCurve(cInversePoints, CURVES_MIN_POLY_POINTS); + phaseOneIccCurve = new DiagonalCurve (cForwardPoints, CURVES_MIN_POLY_POINTS); + phaseOneIccCurveInv = new DiagonalCurve (cInversePoints, CURVES_MIN_POLY_POINTS); } }