Expose linear transform feature of libvips (#1024)

This commit is contained in:
Marcel 2018-02-04 11:36:04 +01:00 committed by Lovell Fuller
parent 73edfb3d2c
commit d599d1f29e
15 changed files with 156 additions and 1 deletions

View File

@ -18,6 +18,7 @@
- [convolve](#convolve)
- [threshold](#threshold)
- [boolean](#boolean)
- [linear](#linear)
## rotate
@ -321,3 +322,17 @@ the selected bitwise boolean `operation` between the corresponding pixels of the
- Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** Invalid parameters
Returns **Sharp**
## linear
Apply the linear formula a \* input + b to the image (levels adjustment)
**Parameters**
- `a` **[Number](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number)?** multiplier (optional, default `1.0`)
- `b` **[Number](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number)?** offset (optional, default `0.0`)
- Throws **[Error](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error)** Invalid parameters
Returns **Sharp**

View File

@ -203,6 +203,8 @@ const Sharp = function (input, options) {
tiffYres: 1.0,
tileSize: 256,
tileOverlap: 0,
linearA: 1,
linearB: 0,
// Function to notify of libvips warnings
debuglog: debuglog,
// Function to notify of queue length changes

View File

@ -406,6 +406,33 @@ function boolean (operand, operator, options) {
return this;
}
/**
* Apply the linear formula a * input + b to the image (levels adjustment)
* @param {Number} [a=1.0] multiplier
* @param {Number} [b=0.0] offset
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function linear (a, b) {
if (!is.defined(a)) {
this.options.linearA = 1.0;
} else if (is.number(a)) {
this.options.linearA = a;
} else {
throw new Error('Invalid linear transform multiplier ' + a);
}
if (!is.defined(b)) {
this.options.linearB = 0.0;
} else if (is.number(b)) {
this.options.linearB = b;
} else {
throw new Error('Invalid linear transform offset ' + b);
}
return this;
}
/**
* Decorate the Sharp prototype with operation-related functions.
* @private
@ -427,7 +454,8 @@ module.exports = function (Sharp) {
normalize,
convolve,
threshold,
boolean
boolean,
linear
].forEach(function (f) {
Sharp.prototype[f.name] = f;
});

View File

@ -39,6 +39,7 @@
"Guy Maliar <guy@tailorbrands.com>",
"Nicolas Coden <nicolas@ncoden.fr>",
"Matt Parrish <matt.r.parrish@gmail.com>",
"Marcel Bretschneider <marcel.bretschneider@gmail.com>",
"Matthew McEachen <matthew+github@mceachen.org>",
"Jarda Kotěšovec <jarda.kotesovec@gmail.com>",
"Kenric D'Souza <kenric.dsouza@gmail.com>",

View File

@ -341,4 +341,18 @@ namespace sharp {
return image.extract_area(left, top, width, height);
}
/*
* Calculate (a * in + b)
*/
VImage Linear(VImage image, double const a, double const b) {
if (HasAlpha(image)) {
// Separate alpha channel
VImage imageWithoutAlpha = image.extract_band(0,
VImage::option()->set("n", image.bands() - 1));
VImage alpha = image[image.bands() - 1];
return imageWithoutAlpha.linear(a, b).bandjoin(alpha);
} else {
return image.linear(a, b);
}
}
} // namespace sharp

View File

@ -92,6 +92,11 @@ namespace sharp {
*/
VImage Trim(VImage image, int const tolerance);
/*
* Linear adjustment (a * in + b)
*/
VImage Linear(VImage image, double const a, double const b);
} // namespace sharp
#endif // SRC_OPERATIONS_H_

View File

@ -644,6 +644,11 @@ class PipelineWorker : public Nan::AsyncWorker {
image = sharp::Gamma(image, baton->gamma);
}
// Linear adjustment (a * in + b)
if (baton->linearA != 1.0 || baton->linearB != 0.0) {
image = sharp::Linear(image, baton->linearA, baton->linearB);
}
// Apply normalisation - stretch luminance to cover full dynamic range
if (baton->normalise) {
image = sharp::Normalise(image);
@ -1185,6 +1190,8 @@ NAN_METHOD(pipeline) {
baton->thresholdGrayscale = AttrTo<bool>(options, "thresholdGrayscale");
baton->trimTolerance = AttrTo<int32_t>(options, "trimTolerance");
baton->gamma = AttrTo<double>(options, "gamma");
baton->linearA = AttrTo<double>(options, "linearA");
baton->linearB = AttrTo<double>(options, "linearB");
baton->greyscale = AttrTo<bool>(options, "greyscale");
baton->normalise = AttrTo<bool>(options, "normalise");
baton->useExifOrientation = AttrTo<bool>(options, "useExifOrientation");

View File

@ -79,6 +79,8 @@ struct PipelineBaton {
int threshold;
bool thresholdGrayscale;
int trimTolerance;
double linearA;
double linearB;
double gamma;
bool greyscale;
bool normalise;
@ -160,6 +162,8 @@ struct PipelineBaton {
threshold(0),
thresholdGrayscale(true),
trimTolerance(0),
linearA(1.0),
linearB(0.0),
gamma(0.0),
greyscale(false),
normalise(false),

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 179 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

79
test/unit/linear.js Normal file
View File

@ -0,0 +1,79 @@
'use strict';
const sharp = require('../../');
const fixtures = require('../fixtures');
const assert = require('assert');
describe('Linear adjustment', function () {
const blackPoint = 70;
const whitePoint = 203;
const a = 255 / (whitePoint - blackPoint);
const b = -blackPoint * a;
it('applies linear levels adjustment w/o alpha ch', function (done) {
sharp(fixtures.inputJpgWithLowContrast)
.linear(a, b)
.toBuffer(function (err, data, info) {
if (err) throw err;
fixtures.assertSimilar(fixtures.expected('low-contrast-linear.jpg'), data, done);
});
});
it('applies slope level adjustment w/o alpha ch', function (done) {
sharp(fixtures.inputJpgWithLowContrast)
.linear(a)
.toBuffer(function (err, data, info) {
if (err) throw err;
fixtures.assertSimilar(fixtures.expected('low-contrast-slope.jpg'), data, done);
});
});
it('applies offset level adjustment w/o alpha ch', function (done) {
sharp(fixtures.inputJpgWithLowContrast)
.linear(null, b)
.toBuffer(function (err, data, info) {
if (err) throw err;
fixtures.assertSimilar(fixtures.expected('low-contrast-offset.jpg'), data, done);
});
});
it('applies linear levels adjustment w alpha ch', function (done) {
sharp(fixtures.inputPngOverlayLayer1)
.linear(a, b)
.toBuffer(function (err, data, info) {
if (err) throw err;
fixtures.assertSimilar(fixtures.expected('alpha-layer-1-fill-linear.png'), data, done);
});
});
it('applies slope level adjustment w alpha ch', function (done) {
sharp(fixtures.inputPngOverlayLayer1)
.linear(a)
.toBuffer(function (err, data, info) {
if (err) throw err;
fixtures.assertSimilar(fixtures.expected('alpha-layer-1-fill-slope.png'), data, done);
});
});
it('applies offset level adjustment w alpha ch', function (done) {
sharp(fixtures.inputPngOverlayLayer1)
.linear(null, b)
.toBuffer(function (err, data, info) {
if (err) throw err;
fixtures.assertSimilar(fixtures.expected('alpha-layer-1-fill-offset.png'), data, done);
});
});
it('Invalid linear arguments', function () {
assert.throws(function () {
sharp(fixtures.inputPngOverlayLayer1)
.linear('foo');
});
assert.throws(function () {
sharp(fixtures.inputPngOverlayLayer1)
.linear(undefined, { 'bar': 'baz' });
});
});
});