Issue 2007 (aperture scaling for raw files): committed work in progress. Works, but may be further tuned after more discussion.

This commit is contained in:
torger
2013-10-21 19:04:57 +02:00
parent b341a56f42
commit de17d4722a
4 changed files with 373 additions and 55 deletions

View File

@@ -19,12 +19,45 @@ namespace rtengine {
CameraConst::CameraConst()
{
memset(dcraw_matrix, 0, sizeof(dcraw_matrix));
white_max = 0;
}
CameraConst::~CameraConst()
{
}
bool
CameraConst::parseApertureScaling(CameraConst *cc, void *ji_)
{
cJSON *ji = (cJSON *)ji_;
if (ji->type != cJSON_Array) {
fprintf(stderr, "\"ranges\":\"aperture_scaling\" must be an array\n");
return false;
}
for (ji = ji->child; ji != NULL; ji = ji->next) {
cJSON *js = cJSON_GetObjectItem(ji, "aperture");
if (!js) {
fprintf(stderr, "missing \"ranges\":\"aperture_scaling\":\"aperture\" object item.\n");
return false;
} else if (js->type != cJSON_Number) {
fprintf(stderr, "\"ranges\":\"aperture_scaling\":\"aperture\" must be a number.\n");
return false;
}
float aperture = (float)js->valuedouble;
js = cJSON_GetObjectItem(ji, "scale_factor");
if (!js) {
fprintf(stderr, "missing \"ranges\":\"aperture_scaling\":\"scale_factor\" object item.\n");
return false;
} else if (js->type != cJSON_Number) {
fprintf(stderr, "\"ranges\":\"aperture_scaling\":\"scale_factor\" must be a number.\n");
return false;
}
float scale_factor = (float)js->valuedouble;
cc->mApertureScaling.insert(std::pair<float,float>(aperture, scale_factor));
}
return true;
}
bool
CameraConst::parseLevels(CameraConst *cc, int bw, void *ji_)
{
@@ -36,10 +69,29 @@ CameraConst::parseLevels(CameraConst *cc, int bw, void *ji_)
cc->mLevels[bw].insert(std::pair<int,struct camera_const_levels>(0, lvl));
return true;
} else if (ji->type != cJSON_Array) {
fprintf(stderr, "\"ranges\":\"black\" must be a number or an array\n");
fprintf(stderr, "\"ranges\":\"%s\" must be a number or an array\n", bw ? "white" : "black");
return false;
}
if (ji->child->type == cJSON_Number) {
struct camera_const_levels lvl;
int i;
cJSON *js;
for (js = ji->child, i = 0; js != NULL && i < 4; js = js->next, i++) {
lvl.levels[i] = js->valueint;
}
if (i == 3) {
lvl.levels[3] = lvl.levels[1]; // G2 = G1
} else if (i == 1) {
lvl.levels[3] = lvl.levels[2] = lvl.levels[1] = lvl.levels[0];
} else if (i != 4 || js != NULL) {
fprintf(stderr, "\"ranges\":\"%s\" array must have 1, 3 or 4 numbers.\n", bw ? "white" : "black");
return false;
}
cc->mLevels[bw].insert(std::pair<int,struct camera_const_levels>(0, lvl));
return true;
}
for (ji = ji->child; ji != NULL; ji = ji->next) {
int iso = 0;
cJSON *js = cJSON_GetObjectItem(ji, "iso");
@@ -129,9 +181,24 @@ CameraConst::parseEntry(void *cJSON_)
goto parse_error;
}
}
ji = cJSON_GetObjectItem(jranges, "white_max");
if (ji) {
if (ji->type != cJSON_Number) {
fprintf(stderr, "\"ranges\":\"white_max\" must be a number\n");
goto parse_error;
}
cc->white_max = (int)ji->valueint;
}
ji = cJSON_GetObjectItem(jranges, "aperture_scaling");
if (ji) {
if (!parseApertureScaling(cc, ji)) {
goto parse_error;
}
}
}
for (int bw = 0; bw < 2; bw++) {
if (!cc->get_Levels(bw, 0)) {
struct camera_const_levels lvl;
if (!cc->get_Levels(lvl, bw, 0, 0)) {
std::map<int, struct camera_const_levels>::iterator it;
it = cc->mLevels[bw].begin();
if (it != cc->mLevels[bw].end()) {
@@ -184,13 +251,19 @@ CameraConst::update_Levels(const CameraConst *other) {
mLevels[1].clear();
mLevels[1] = other->mLevels[1];
}
if (other->mApertureScaling.size()) {
mApertureScaling.clear();
mApertureScaling = other->mApertureScaling;
}
if (other->white_max)
white_max = other->white_max;
// for (std::map<int, struct camera_const_levels>::iterator i=other->mLevels[0].begin(); i!=other->mLevels[0].end(); i++) {
// }
}
const struct camera_const_levels *
CameraConst::get_Levels(int bw, int iso)
bool
CameraConst::get_Levels(struct camera_const_levels & lvl, int bw, int iso, float fnumber)
{
std::map<int, struct camera_const_levels>::iterator it;
it = mLevels[bw].find(iso);
@@ -207,26 +280,88 @@ CameraConst::get_Levels(int bw, int iso)
}
it = best_it;
if (it == mLevels[bw].end()) {
return 0;
return false;
}
}
return &it->second;
lvl = it->second;
if (fnumber > 0 && mApertureScaling.size() > 0) {
std::map<float, float>::iterator it;
it = mApertureScaling.find(fnumber);
if (it == mApertureScaling.end()) {
// fnumber may be an exact aperture, eg 1.414, or a rounded eg 1.4. In our map we
// should have rounded numbers so we translate and retry the lookup
// table with traditional 1/3 stop f-number rounding used by most cameras, we only
// have in the range 0.7 - 10.0, but aperture scaling rarely happen past f/4.0
const float fn_tab[8][3] = {
{ 0.7, 0.8, 0.9 },
{ 1.0, 1.1, 1.2 },
{ 1.4, 1.6, 1.8 },
{ 2.0, 2.2, 2.5 },
{ 2.8, 3.2, 3.5 },
{ 4.0, 4.5, 5.0 },
{ 5.6, 6.3, 7.1 },
{ 8.0, 9.0, 10.0 }
};
for (int avh = 0; avh < 8; avh++) {
for (int k = 0; k < 3; k++) {
float av = (avh-1) + (float)k / 3;
float aperture = sqrtf(powf(2, av));
if (fnumber > aperture*0.97 && fnumber < aperture/0.97) {
fnumber = fn_tab[avh][k];
it = mApertureScaling.find(fnumber);
avh = 7;
break;
}
}
}
}
float scaling = 1.0;
if (it == mApertureScaling.end()) {
std::map<float, float>::reverse_iterator it;
for (it = mApertureScaling.rbegin(); it != mApertureScaling.rend(); it++) {
if (it->first > fnumber) {
scaling = it->second;
} else {
break;
}
}
} else {
scaling = it->second;
}
if (scaling > 1.0) {
for (int i = 0; i < 4; i++) {
lvl.levels[i] *= scaling;
if (white_max > 0 && lvl.levels[i] > white_max) {
lvl.levels[i] = white_max;
}
}
}
}
return true;
}
int
CameraConst::get_BlackLevel(const int idx, const int iso_speed)
CameraConst::get_BlackLevel(const int idx, const int iso_speed, const float fnumber)
{
assert(idx >= 0 && idx <= 3);
const struct camera_const_levels *lvl = get_Levels(0, iso_speed);
return (lvl) ? lvl->levels[idx] : -1;
struct camera_const_levels lvl;
if (!get_Levels(lvl, 0, iso_speed, fnumber)) {
return -1;
}
return lvl.levels[idx];
}
int
CameraConst::get_WhiteLevel(const int idx, const int iso_speed)
CameraConst::get_WhiteLevel(const int idx, const int iso_speed, const float fnumber)
{
assert(idx >= 0 && idx <= 3);
const struct camera_const_levels *lvl = get_Levels(1, iso_speed);
return (lvl) ? lvl->levels[idx] : -1;
struct camera_const_levels lvl;
if (!get_Levels(lvl, 1, iso_speed, fnumber)) {
return -1;
}
return lvl.levels[idx];
}
bool
@@ -239,11 +374,11 @@ CameraConstantsStore::parse_camera_constants_file(Glib::ustring filename_)
fprintf(stderr, "Could not open camera constants file \"%s\": %s\n", filename, strerror(errno));
return false;
}
size_t bufsize = 64; // use small initial size just to make sure to test realloc() case
size_t bufsize = 4096;
size_t datasize = 0, ret;
char *buf = (char *)malloc(bufsize);
while ((ret = fread(&buf[datasize], 1, bufsize - datasize, stream)) != 0) {
datasize += bufsize - datasize;
datasize += ret;
if (datasize == bufsize) {
bufsize += 4096;
buf = (char *)realloc(buf, bufsize);
@@ -256,7 +391,6 @@ CameraConstantsStore::parse_camera_constants_file(Glib::ustring filename_)
return false;
}
fclose(stream);
datasize += ret;
buf = (char *)realloc(buf, datasize + 1);
buf[datasize] = '\0';

View File

@@ -17,20 +17,23 @@ class CameraConst {
private:
Glib::ustring make_model;
short dcraw_matrix[12];
int white_max;
std::map<int, struct camera_const_levels> mLevels[2];
std::map<float, float> mApertureScaling;
CameraConst();
~CameraConst();
static bool parseLevels(CameraConst *cc, int bw, void *ji);
const struct camera_const_levels *get_Levels(int bw, int iso);
static bool parseLevels(CameraConst *cc, int bw, void *ji);
static bool parseApertureScaling(CameraConst *cc, void *ji);
bool get_Levels(struct camera_const_levels & lvl, int bw, int iso, float fnumber);
public:
static CameraConst *parseEntry(void *cJSON);
bool has_dcrawMatrix(void);
void update_dcrawMatrix(const short *other);
const short *get_dcrawMatrix(void);
int get_BlackLevel(int idx, int iso_speed);
int get_WhiteLevel(int idx, int iso_speed);
int get_BlackLevel(int idx, int iso_speed, float fnumber);
int get_WhiteLevel(int idx, int iso_speed, float fnumber);
void update_Levels(const CameraConst *other);
};

View File

@@ -83,6 +83,132 @@ Examples:
}
}
How to measure white levels:
----------------------------
Dcraw which provides the default values to RawTherapee often provides too high
white levels, and only provides a single value regardless of color channel, ISO
or aperture. If you open an image with a large clipped area and that is
rendered in a pink/magenta color rather than white it usually means that the
white level constant is too high. You can fix this by adjusting the
"Raw White Point" in the raw tab inside RawTherapee, or permanently fix it by
measuring and providing a more exact white level in camconst.json so
RawTherapee gets to know from start where the camera actually clips.
Here's a guide how to do it.
Shoot with your camera into a bright light source, such as a lamp, and make
sure the shutter speed is long enough to get overexposure (we want
clipping!). Some cameras have fuzzy white levels and may look less fuzzy with
strong over-exposure (most samples are then pushed to some hard raw limit), if
you measure such a camera you'd want to over-expose softly (ie one-two stops or
so) so you can more correctly see where the camera stops providing sane
information.
Use f/5.6 or smaller aperture (=larger f-number) to avoid any raw scaling
the camera might have for large apertures. Open the file in a raw analyzer
such as Rawdigger and check the pixel values for the clipped areas (if you
are using Rawdigger, make sure you have disabled "subtract black" in
preferences or else sample values can be wrong). In this stage we always look
at white level before black level subtraction! White levels can be different on
color channel (R, G1, B, G2, note the two greens, most often both green
channels have the same white level though) and vary depending on ISO setting,
so make one shoot for each ISO (even 1/3 steps, so yes it can be quite a lot of
pictures to shoot and check).
In addition, many cameras scale the raw values for large apertures. It's
generally not that important to cover this, but if you want to extract most
out of the camera you should cover this too. Then you need to shoot with a
wide aperture lens (ideally the widest available from the manufacturer) and
test each aperture (1/3 steps) from the widest (say f/1.2) until the camera
stops scaling the raw values (usually f/2.8 or f/4.0). If the camera also
have ISO scaling you need to shoot at these different ISOs to detect any
differences in scaling, there can be a bit of variation. If you don't have
access to the widest lens available for the system (say only an f/1.8 lens
instead of an f/1.2) it can still be valuable to have the values down to
what you can provide.
PROVIDE CONSERVATIVE VALUES. Most cameras have a little noise at the white
level, and some can have a lot. In your raw analyzer, move around and look at
the values in the clipped areas to get a sense of the variation, and/or look
at the histogram. White it's common to have very little variation, say only
+/-2 units, some can have +/-500 or more. There can also be camera-to-camera
variation.
If the white level is set too high RawTherapee will not think the pixels are
clipped and you can get discoloured highlights (usually pink), this is what
we want to avoid. If white level is set too low RawTherapee will clip early, ie
you lose a little highlight detail, but the color is rendered correctly and
highlight reconstruction can work properly, so this is not as bad. This is why
we want conservative values.
By conservative values we mean that if you see a clipping level of most often
15760 and occassionally 15759 (ie very small variation of clipping level which
is a common case), you set the white level 10 - 20 units below, say at 15750 in
this example, this way we get a little margin from noise and camera variation.
Since sensor raw values are linear you lose in this example log2(1-10/15760) =
-0.001 stop of detail, ie irrelevant. Thus it's better to provide RawTherapee
with knowledge where the image clips rather than keeping that last 0.001 stop
of highlight information and risking that clipping will not be detected
properly.
If you have a fuzzy white level look at the linear histogram; you will probably
see a normal/gaussian distribution (bell shape) noise peak at clipping and
probably also a peak at a hard raw data clip level usually at a power of
two - 1, such as 4095 or 16383. Then you pick a value just before the bell
shape rises, ie to the left of the bell meaning that you cut away the whole
fuzzy noise peak. If a little of the starting edge of the noise will be
included it's not harmful, but 99% of it should be above.
If you have used Adobe's DNG Converter and analyzed it's output you may have
noticed that it's very conservative regarding white levels, ie it cuts away
quite a lot from the top. While we also recommend to be conservative, you can
generally be a little bit less conservative than Adobe's DNG Converter.
RawTherapee is meant to max out what you can get from your camera, and the
white levels should (within reason) mirror that.
The aperture scaling feature is meant to raise the white level to not miss out
on highlight detail when the camera has scaled the raw values (and thus
raised white levels). Many cameras do this, but not all, and can only do it
for lenses that report aperture to the camera (ie you see it in the EXIF
data). Providing proper aperture scaling values is a bit more advanced task,
so if you are unsure we recommend to skip that part.
Beware that the raw format may have a ceiling so that it clips scaled values,
for example the Canon 5D mark II maxes out at 16383 which happens at f/1.8
for ISOs with the white level at 15750, but for ISO160 when the white level
is 12800 it does not max out. If there is such a raw limit it must also be
provided ("ranges":"white_max"). Usually you will not need a margin on
white_max as it clipping is a result of an in-camera math operation.
Note that aperture scaling can be quite small, for the 5D mark II it's only
0.1 stop down to f/1.4 and then it can be discussed if it's worthwhile to care.
The "worst" cameras scale about 0.6 stops though, and then it's more
valuable to compensate. If you skip aperture scaling you will clip too early
and miss that highlight detail, but you get no processing problems. Setting
unconservative scale factors can on the other hand cause a too high
whitelevel and break highlight processing, so be careful. Scaling can vary
sligthly depending on ISO (if white levels vary) so make sure to provide
conservative scalings so regardless of ISO you don't get a too high white
level. We recommend to keep a wider margin here than on the white levels,
ie 0.5-1% lower or so. For example if base (conservative!) white level is
15750 and the scaled is 16221 we have a scaling factor of 16221/15750=1.0299
ie +2.9% we set the factor to 1.02 or +2% to keep a margin.
Scaling takes place on the raw values before black level subtraction, and if
a black level constant is provided also that will be scaled.
If RawTherapee doesn't find an entry for the aperture used in the image, it
will pick the closest above. Ie if the apertures 1.0 and 2.0 is in the table
and the image has aperture 1.2, it will pick scaling for 2.0, even if 1.0 is
the closer aperture. The reason for always checking the closest above is that
we rather get a bit low white level than too high, as discussed before.
Some cameras have different white levels on different color channels. Note
that some cameras with different white levels per color have so small
differences that you may just provide a single value instead, then pick
the lowest white level and make a conservative margin as always.
*/
{"camera_constants": [
@@ -93,32 +219,57 @@ Examples:
// black levels are read from raw masked pixels
// white levels are same for all colors, but vary on ISO
"white": [
{ "iso": 50, "levels": 15760 },
{ "iso": 100, "levels": 15760 },
{ "iso": 125, "levels": 15760 },
{ "iso": 160, "levels": 12810 },
{ "iso": 200, "levels": 15760 },
{ "iso": 250, "levels": 15760 },
{ "iso": 320, "levels": 12810 },
{ "iso": 400, "levels": 15760 },
{ "iso": 500, "levels": 15760 },
{ "iso": 640, "levels": 12810 },
{ "iso": 800, "levels": 15760 },
{ "iso": 1000, "levels": 15760 },
{ "iso": 1250, "levels": 12810 },
{ "iso": 1600, "levels": 15760 },
{ "iso": 2000, "levels": 15760 },
{ "iso": 2500, "levels": 15760 },
{ "iso": 3200, "levels": 15760 },
{ "iso": 4000, "levels": 15760 },
{ "iso": 5000, "levels": 15760 },
{ "iso": 6400, "levels": 16383 },
{ "iso": 12800, "levels": 16383 },
{ "iso": 25600, "levels": 16383 }
{ "iso": 50, "levels": 15750 }, // typical: 15760
{ "iso": 100, "levels": 15750 },
{ "iso": 125, "levels": 15750 },
{ "iso": 160, "levels": 12800 },
{ "iso": 200, "levels": 15750 },
{ "iso": 250, "levels": 15750 },
{ "iso": 320, "levels": 12800 }, // typical: 12810
{ "iso": 400, "levels": 15750 },
{ "iso": 500, "levels": 15750 },
{ "iso": 640, "levels": 12800 },
{ "iso": 800, "levels": 15750 },
{ "iso": 1000, "levels": 15750 },
{ "iso": 1250, "levels": 12800 },
{ "iso": 1600, "levels": 15750 },
{ "iso": 2000, "levels": 15750 },
{ "iso": 2500, "levels": 15750 },
{ "iso": 3200, "levels": 15750 },
{ "iso": 4000, "levels": 15750 },
{ "iso": 5000, "levels": 15750 },
{ "iso": 6400, "levels": 16370 }, // typical: 16383
{ "iso": 12800, "levels": 16370 },
{ "iso": 25600, "levels": 16370 }
],
"white_max": 16383,
"aperture_scaling": [
/* note: no scale factors known for f/1.2 and f/1.0 (had no lenses to test with), but the
typical 15750 white level maxes out at "white_max" for f/1.8 and below anyway. */
{ "aperture": 1.4, "scale_factor": 1.077 },
{ "aperture": 1.6, "scale_factor": 1.054 },
{ "aperture": 1.8, "scale_factor": 1.039 },
{ "aperture": 2.0, "scale_factor": 1.031 },
{ "aperture": 2.2, "scale_factor": 1.021 },
{ "aperture": 2.5, "scale_factor": 1.016 },
{ "aperture": 2.8, "scale_factor": 1.010 },
{ "aperture": 3.2, "scale_factor": 1.0046 },
{ "aperture": 3.5, "scale_factor": 1.0031 }
]
}
},
{
"make_model": "Nikon D7000",
"dcraw_matrix": [ 7530,-1942,-255,-4318,11390,3362,-926,1694,7649 ], // matrix provided by Tanveer(tsk1979)
/* Haven't tested the camera for aperture scaling or ISO differences on white levels, ie there may
be further improvements to make. Differences between white levels on the channels are so small
that there's little value to provide for each (rather than just set a single value at 15760), but
we do this anyway as this is the first entry after the separate-white-level code was added, and
we wanted an entry to show the concept. G1 and G2 have the same level, thus only 3 numbers in the array. */
"ranges": { "white": [ 16370, 15760, 16370 ] }
},
// Phase One: color matrices borrowed from Adobe DNG Converter, black/white levels tested on actual raw files
{
"make_model": "Phase One P40+",
@@ -201,6 +352,10 @@ Examples:
"make_model": "DummyMake DummyModel",
"dcraw_matrix": [ 7530,-1942,-255,-4318,11390,3362,-926,1694,7649 ],
"ranges": {
"aperture_scaling": [
{ "aperture": 1.2, "scale_factor": 1.1 },
{ "aperture": 1.4, "scale_factor": 1.08 }
],
"black": [
{ "iso": 100 , "levels": [ 10, 20, 10, 20 ] },
{ "iso": 3200, "levels": [ 50, 60, 50, 60 ] }
@@ -208,7 +363,8 @@ Examples:
"white": [
{ "iso": 100 , "levels": [ 10000, 11000, 10000, 11000 ] },
{ "iso": 3200, "levels": [ 11000, 11000, 10000, 11000 ] }
]
],
"white_max": 16383
}
}
]}

View File

@@ -28,6 +28,9 @@ RawImage::RawImage( const Glib::ustring name )
,allocation(NULL)
{
memset(maximum_c4, 0, sizeof(maximum_c4));
RT_matrix_from_constant = 0;
RT_blacklevel_from_constant = 0;
RT_whitelevel_from_constant = 0;
}
RawImage::~RawImage()
@@ -108,6 +111,27 @@ skip_block: ;
}
if (pre_mul_[3] == 0)
pre_mul_[3] = this->get_colors() < 4 ? pre_mul_[1] : 1;
bool multiple_whites = false;
int largest_white = this->get_white(0);
for (c = 1; c < 4; c++) {
if (this->get_white(c) != this->get_white(0)) {
multiple_whites = true;
if (this->get_white(c) > largest_white) {
largest_white = this->get_white(0);
}
}
}
if (multiple_whites) {
// dcraw's pre_mul/cam_mul expects a single white, so if we have provided multiple whites we need
// to adapt scaling to avoid color shifts.
for (c = 0; c < 4; c++) {
// we don't really need to do the largest_white division but do so just to keep pre_mul in similar
// range as before adjustment so they don't look strangely large if someone would print them
pre_mul_[c] *= (float)this->get_white(c) / largest_white;
}
}
for (dmin = DBL_MAX, dmax = c = 0; c < 4; c++) {
if (dmin > pre_mul_[c])
dmin = pre_mul_[c];
@@ -118,7 +142,7 @@ skip_block: ;
for (c = 0; c < 4; c++) {
int sat = this->get_white(c) - this->get_cblack(c);
scale_mul_[c] = (pre_mul_[c] /= dmax) * 65535.0 / sat;
}
}
}
int RawImage::loadRaw (bool loadData, bool closeFile)
@@ -218,11 +242,11 @@ int RawImage::loadRaw (bool loadData, bool closeFile)
if (cc) {
for (int i = 0; i < 4; i++) {
if (RT_blacklevel_from_constant) {
black_c4[i] = cc->get_BlackLevel(i, iso_speed);
black_c4[i] = cc->get_BlackLevel(i, iso_speed, aperture);
}
// load 4 channel white level here, will be used if available
if (RT_whitelevel_from_constant) {
maximum_c4[i] = cc->get_WhiteLevel(i, iso_speed);
maximum_c4[i] = cc->get_WhiteLevel(i, iso_speed, aperture);
}
}
}
@@ -304,6 +328,7 @@ RawImage::get_thumbSwap() const
bool
DCraw::dcraw_coeff_overrides(const char make[], const char model[], const int iso_speed, short trans[12], int *black_level, int *white_level)
{
static const int dcraw_arw2_scaling_bugfix_shift = 0; // not yet enabled, should be 2 when enabled
static const struct {
const char *prefix;
int black_level, white_level; // set to -1 for no change
@@ -415,21 +440,21 @@ DCraw::dcraw_coeff_overrides(const char make[], const char model[], const int is
{ 7181,-1706,-55,-3557,11409,2450,-621,2072,7533 } },
{ "Sony DSLR-A700", 126, 0, /* RT */
{ "Sony DSLR-A700", 126 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT */
{ 6509,-1333,-137,-6171,13621,2824,-1490,2226,6952 } },
{ "Sony DSLR-A900", 128, 0, /* RT */
{ "Sony DSLR-A900", 128 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT */
{ 5715,-1433,-410,-5603,12937,2989,-644,1247,8372 } },
{ "SONY NEX-3", 128, 0, /* RT - Colin Walker */
{ "SONY NEX-3", 128 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT - Colin Walker */
{ 5145,-741,-123,-4915,12310,2945,-794,1489,6906 } },
{ "SONY NEX-5", 128, 0, /* RT - Colin Walker */
{ "SONY NEX-5", 128 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT - Colin Walker */
{ 5154,-716,-115,-5065,12506,2882,-988,1715,6800 } },
{ "Sony NEX-5N", 128, 0, /* RT - Colin Walker */
{ "Sony NEX-5N", 128 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT - Colin Walker */
{ 5130,-1055,-269,-4473,11797,3050,-701,1310,7121 } },
{ "Sony NEX-5R", 128, 0,
{ "Sony NEX-5R", 128 << dcraw_arw2_scaling_bugfix_shift, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "SONY NEX-C3", 128, 0, /* RT - Colin Walker */
{ "SONY NEX-C3", 128 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT - Colin Walker */
{ 5130,-1055,-269,-4473,11797,3050,-701,1310,7121 } },
{ "Sony SLT-A77", 128, 0, /* RT - Colin Walker */
{ "Sony SLT-A77", 128 << dcraw_arw2_scaling_bugfix_shift, 0, /* RT - Colin Walker */
{ 5126,-830,-261,-4788,12196,2934,-948,1602,7068 } },
};
@@ -450,8 +475,8 @@ DCraw::dcraw_coeff_overrides(const char make[], const char model[], const int is
rtengine::CameraConstantsStore* ccs = rtengine::CameraConstantsStore::getInstance();
rtengine::CameraConst *cc = ccs->get(make, model);
if (cc) {
*black_level = cc->get_BlackLevel(0, iso_speed);
*white_level = cc->get_WhiteLevel(0, iso_speed);
*black_level = cc->get_BlackLevel(0, iso_speed, aperture);
*white_level = cc->get_WhiteLevel(0, iso_speed, aperture);
if (cc->has_dcrawMatrix()) {
const short *mx = cc->get_dcrawMatrix();
for (int j = 0; j < 12; j++) {