Improve multi-frame image resizing (#2789)
* Ports vips_thumbnail logic to sharp * Deprecates the pageHeight output option for WebP/GIF
@ -458,9 +458,8 @@ function png (options) {
|
||||
* @param {boolean} [options.nearLossless=false] - use near_lossless compression mode
|
||||
* @param {boolean} [options.smartSubsample=false] - use high quality chroma subsampling
|
||||
* @param {number} [options.effort=4] - CPU effort, between 0 (fastest) and 6 (slowest)
|
||||
* @param {number} [options.pageHeight] - page height for animated output
|
||||
* @param {number} [options.loop=0] - number of animation iterations, use 0 for infinite animation
|
||||
* @param {number[]} [options.delay] - list of delays between animation frames (in milliseconds)
|
||||
* @param {number|number[]} [options.delay] - delay(s) between animation frames (in milliseconds)
|
||||
* @param {boolean} [options.force=true] - force WebP output, otherwise attempt to use input format
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid options
|
||||
@ -527,10 +526,7 @@ function webp (options) {
|
||||
* width: 128,
|
||||
* height: 128 * pages
|
||||
* })
|
||||
* .gif({
|
||||
* pageHeight: 128,
|
||||
* dither: 0
|
||||
* })
|
||||
* .gif({ dither: 0 })
|
||||
* .toBuffer();
|
||||
*
|
||||
* @param {Object} [options] - output options
|
||||
@ -538,9 +534,8 @@ function webp (options) {
|
||||
* @param {number} [options.colors=256] - alternative spelling of `options.colours`
|
||||
* @param {number} [options.effort=7] - CPU effort, between 1 (fastest) and 10 (slowest)
|
||||
* @param {number} [options.dither=1.0] - level of Floyd-Steinberg error diffusion, between 0 (least) and 1 (most)
|
||||
* @param {number} [options.pageHeight] - page height for animated output
|
||||
* @param {number} [options.loop=0] - number of animation iterations, use 0 for infinite animation
|
||||
* @param {number[]} [options.delay] - list of delays between animation frames (in milliseconds)
|
||||
* @param {number|number[]} [options.delay] - delay(s) between animation frames (in milliseconds)
|
||||
* @param {boolean} [options.force=true] - force GIF output, otherwise attempt to use input format
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid options
|
||||
@ -657,20 +652,12 @@ function jp2 (options) {
|
||||
* @private
|
||||
*
|
||||
* @param {Object} [source] - output options
|
||||
* @param {number} [source.pageHeight] - page height for animated output
|
||||
* @param {number} [source.loop=0] - number of animation iterations, use 0 for infinite animation
|
||||
* @param {number[]} [source.delay] - list of delays between animation frames (in milliseconds)
|
||||
* @param {Object} [target] - target object for valid options
|
||||
* @throws {Error} Invalid options
|
||||
*/
|
||||
function trySetAnimationOptions (source, target) {
|
||||
if (is.object(source) && is.defined(source.pageHeight)) {
|
||||
if (is.integer(source.pageHeight) && source.pageHeight > 0) {
|
||||
target.pageHeight = source.pageHeight;
|
||||
} else {
|
||||
throw is.invalidParameterError('pageHeight', 'integer larger than 0', source.pageHeight);
|
||||
}
|
||||
}
|
||||
if (is.object(source) && is.defined(source.loop)) {
|
||||
if (is.integer(source.loop) && is.inRange(source.loop, 0, 65535)) {
|
||||
target.loop = source.loop;
|
||||
@ -679,13 +666,16 @@ function trySetAnimationOptions (source, target) {
|
||||
}
|
||||
}
|
||||
if (is.object(source) && is.defined(source.delay)) {
|
||||
if (
|
||||
// We allow singular values as well
|
||||
if (is.integer(source.delay) && is.inRange(source.delay, 0, 65535)) {
|
||||
target.delay = [source.delay];
|
||||
} else if (
|
||||
Array.isArray(source.delay) &&
|
||||
source.delay.every(is.integer) &&
|
||||
source.delay.every(v => is.inRange(v, 0, 65535))) {
|
||||
target.delay = source.delay;
|
||||
} else {
|
||||
throw is.invalidParameterError('delay', 'array of integers between 0 and 65535', source.delay);
|
||||
throw is.invalidParameterError('delay', 'integer or an array of integers between 0 and 65535', source.delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
108
src/common.cc
@ -490,30 +490,24 @@ namespace sharp {
|
||||
|
||||
/*
|
||||
Set animation properties if necessary.
|
||||
Non-provided properties will be loaded from image.
|
||||
*/
|
||||
VImage SetAnimationProperties(VImage image, int pageHeight, std::vector<int> delay, int loop) {
|
||||
bool hasDelay = delay.size() != 1 || delay.front() != -1;
|
||||
VImage SetAnimationProperties(VImage image, int nPages, int pageHeight, std::vector<int> delay, int loop) {
|
||||
bool hasDelay = !delay.empty();
|
||||
|
||||
if (pageHeight == 0 && image.get_typeof(VIPS_META_PAGE_HEIGHT) == G_TYPE_INT) {
|
||||
pageHeight = image.get_int(VIPS_META_PAGE_HEIGHT);
|
||||
// Avoid a copy if none of the animation properties are needed.
|
||||
if (nPages == 1 && !hasDelay && loop == -1) return image;
|
||||
|
||||
if (delay.size() == 1) {
|
||||
// We have just one delay, repeat that value for all frames.
|
||||
delay.insert(delay.end(), nPages - 1, delay[0]);
|
||||
}
|
||||
|
||||
if (!hasDelay && image.get_typeof("delay") == VIPS_TYPE_ARRAY_INT) {
|
||||
delay = image.get_array_int("delay");
|
||||
hasDelay = true;
|
||||
}
|
||||
|
||||
if (loop == -1 && image.get_typeof("loop") == G_TYPE_INT) {
|
||||
loop = image.get_int("loop");
|
||||
}
|
||||
|
||||
if (pageHeight == 0) return image;
|
||||
|
||||
// It is necessary to create the copy as otherwise, pageHeight will be ignored!
|
||||
// Attaching metadata, need to copy the image.
|
||||
VImage copy = image.copy();
|
||||
|
||||
copy.set(VIPS_META_PAGE_HEIGHT, pageHeight);
|
||||
// Only set page-height if we have more than one page, or this could
|
||||
// accidentally turn into an animated image later.
|
||||
if (nPages > 1) copy.set(VIPS_META_PAGE_HEIGHT, pageHeight);
|
||||
if (hasDelay) copy.set("delay", delay);
|
||||
if (loop != -1) copy.set("loop", loop);
|
||||
|
||||
@ -556,6 +550,14 @@ namespace sharp {
|
||||
return copy;
|
||||
}
|
||||
|
||||
/*
|
||||
Multi-page images can have a page height. Fetch it, and sanity check it.
|
||||
If page-height is not set, it defaults to the image height
|
||||
*/
|
||||
int GetPageHeight(VImage image) {
|
||||
return vips_image_get_page_height(image.get_image());
|
||||
}
|
||||
|
||||
/*
|
||||
Check the proposed format supports the current dimensions.
|
||||
*/
|
||||
@ -882,4 +884,74 @@ namespace sharp {
|
||||
return image;
|
||||
}
|
||||
|
||||
std::pair<double, double> ResolveShrink(int width, int height, int targetWidth, int targetHeight,
|
||||
Canvas canvas, bool swap, bool withoutEnlargement) {
|
||||
if (swap) {
|
||||
// Swap input width and height when requested.
|
||||
std::swap(width, height);
|
||||
}
|
||||
|
||||
double hshrink = 1.0;
|
||||
double vshrink = 1.0;
|
||||
|
||||
if (targetWidth > 0 && targetHeight > 0) {
|
||||
// Fixed width and height
|
||||
hshrink = static_cast<double>(width) / targetWidth;
|
||||
vshrink = static_cast<double>(height) / targetHeight;
|
||||
|
||||
switch (canvas) {
|
||||
case Canvas::CROP:
|
||||
case Canvas::MIN:
|
||||
if (hshrink < vshrink) {
|
||||
vshrink = hshrink;
|
||||
} else {
|
||||
hshrink = vshrink;
|
||||
}
|
||||
break;
|
||||
case Canvas::EMBED:
|
||||
case Canvas::MAX:
|
||||
if (hshrink > vshrink) {
|
||||
vshrink = hshrink;
|
||||
} else {
|
||||
hshrink = vshrink;
|
||||
}
|
||||
break;
|
||||
case Canvas::IGNORE_ASPECT:
|
||||
if (swap) {
|
||||
std::swap(hshrink, vshrink);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else if (targetWidth > 0) {
|
||||
// Fixed width
|
||||
hshrink = static_cast<double>(width) / targetWidth;
|
||||
|
||||
if (canvas != Canvas::IGNORE_ASPECT) {
|
||||
// Auto height
|
||||
vshrink = hshrink;
|
||||
}
|
||||
} else if (targetHeight > 0) {
|
||||
// Fixed height
|
||||
vshrink = static_cast<double>(height) / targetHeight;
|
||||
|
||||
if (canvas != Canvas::IGNORE_ASPECT) {
|
||||
// Auto width
|
||||
hshrink = vshrink;
|
||||
}
|
||||
}
|
||||
|
||||
// We should not enlarge (oversample) the output image,
|
||||
// if withoutEnlargement is specified.
|
||||
if (withoutEnlargement) {
|
||||
hshrink = std::max(1.0, hshrink);
|
||||
vshrink = std::max(1.0, vshrink);
|
||||
}
|
||||
|
||||
// We don't want to shrink so much that we send an axis to 0
|
||||
hshrink = std::min(hshrink, static_cast<double>(width));
|
||||
vshrink = std::min(vshrink, static_cast<double>(height));
|
||||
|
||||
return std::make_pair(hshrink, vshrink);
|
||||
}
|
||||
|
||||
} // namespace sharp
|
||||
|
26
src/common.h
@ -135,6 +135,14 @@ namespace sharp {
|
||||
MISSING
|
||||
};
|
||||
|
||||
enum class Canvas {
|
||||
CROP,
|
||||
EMBED,
|
||||
MAX,
|
||||
MIN,
|
||||
IGNORE_ASPECT
|
||||
};
|
||||
|
||||
// How many tasks are in the queue?
|
||||
extern volatile int counterQueue;
|
||||
|
||||
@ -208,9 +216,8 @@ namespace sharp {
|
||||
|
||||
/*
|
||||
Set animation properties if necessary.
|
||||
Non-provided properties will be loaded from image.
|
||||
*/
|
||||
VImage SetAnimationProperties(VImage image, int pageHeight, std::vector<int> delay, int loop);
|
||||
VImage SetAnimationProperties(VImage image, int nPages, int pageHeight, std::vector<int> delay, int loop);
|
||||
|
||||
/*
|
||||
Remove animation properties from image.
|
||||
@ -232,6 +239,12 @@ namespace sharp {
|
||||
*/
|
||||
VImage SetDensity(VImage image, const double density);
|
||||
|
||||
/*
|
||||
Multi-page images can have a page height. Fetch it, and sanity check it.
|
||||
If page-height is not set, it defaults to the image height
|
||||
*/
|
||||
int GetPageHeight(VImage image);
|
||||
|
||||
/*
|
||||
Check the proposed format supports the current dimensions.
|
||||
*/
|
||||
@ -325,6 +338,15 @@ namespace sharp {
|
||||
*/
|
||||
VImage EnsureAlpha(VImage image, double const value);
|
||||
|
||||
/*
|
||||
Calculate the shrink factor, taking into account auto-rotate, the canvas
|
||||
mode, and so on. The hshrink/vshrink are the amount to shrink the input
|
||||
image axes by in order for the output axes (ie. after rotation) to match
|
||||
the required thumbnail width/height and canvas mode.
|
||||
*/
|
||||
std::pair<double, double> ResolveShrink(int width, int height, int targetWidth, int targetHeight,
|
||||
Canvas canvas, bool swap, bool withoutEnlargement);
|
||||
|
||||
} // namespace sharp
|
||||
|
||||
#endif // SRC_COMMON_H_
|
||||
|
@ -180,8 +180,6 @@ class MetadataWorker : public Napi::AsyncWorker {
|
||||
}
|
||||
if (baton->pageHeight > 0) {
|
||||
info.Set("pageHeight", baton->pageHeight);
|
||||
} else if (baton->pages > 0) {
|
||||
info.Set("pageHeight", baton->height);
|
||||
}
|
||||
if (baton->loop >= 0) {
|
||||
info.Set("loop", baton->loop);
|
||||
|
@ -308,4 +308,98 @@ namespace sharp {
|
||||
return image;
|
||||
}
|
||||
|
||||
/*
|
||||
* Split and crop each frame, reassemble, and update pageHeight.
|
||||
*/
|
||||
VImage CropMultiPage(VImage image, int left, int top, int width, int height,
|
||||
int nPages, int *pageHeight) {
|
||||
if (top == 0 && height == *pageHeight) {
|
||||
// Fast path; no need to adjust the height of the multi-page image
|
||||
return image.extract_area(left, 0, width, image.height());
|
||||
} else {
|
||||
std::vector<VImage> pages;
|
||||
pages.reserve(nPages);
|
||||
|
||||
// Split the image into cropped frames
|
||||
for (int i = 0; i < nPages; i++) {
|
||||
pages.push_back(
|
||||
image.extract_area(left, *pageHeight * i + top, width, height));
|
||||
}
|
||||
|
||||
// Reassemble the frames into a tall, thin image
|
||||
VImage assembled = VImage::arrayjoin(pages,
|
||||
VImage::option()->set("across", 1));
|
||||
|
||||
// Update the page height
|
||||
*pageHeight = height;
|
||||
|
||||
return assembled;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Split into frames, embed each frame, reassemble, and update pageHeight.
|
||||
*/
|
||||
VImage EmbedMultiPage(VImage image, int left, int top, int width, int height,
|
||||
std::vector<double> background, int nPages, int *pageHeight) {
|
||||
if (top == 0 && height == *pageHeight) {
|
||||
// Fast path; no need to adjust the height of the multi-page image
|
||||
return image.embed(left, 0, width, image.height(), VImage::option()
|
||||
->set("extend", VIPS_EXTEND_BACKGROUND)
|
||||
->set("background", background));
|
||||
} else if (left == 0 && width == image.width()) {
|
||||
// Fast path; no need to adjust the width of the multi-page image
|
||||
std::vector<VImage> pages;
|
||||
pages.reserve(nPages);
|
||||
|
||||
// Rearrange the tall image into a vertical grid
|
||||
image = image.grid(*pageHeight, nPages, 1);
|
||||
|
||||
// Do the embed on the wide image
|
||||
image = image.embed(0, top, image.width(), height, VImage::option()
|
||||
->set("extend", VIPS_EXTEND_BACKGROUND)
|
||||
->set("background", background));
|
||||
|
||||
// Split the wide image into frames
|
||||
for (int i = 0; i < nPages; i++) {
|
||||
pages.push_back(
|
||||
image.extract_area(width * i, 0, width, height));
|
||||
}
|
||||
|
||||
// Reassemble the frames into a tall, thin image
|
||||
VImage assembled = VImage::arrayjoin(pages,
|
||||
VImage::option()->set("across", 1));
|
||||
|
||||
// Update the page height
|
||||
*pageHeight = height;
|
||||
|
||||
return assembled;
|
||||
} else {
|
||||
std::vector<VImage> pages;
|
||||
pages.reserve(nPages);
|
||||
|
||||
// Split the image into frames
|
||||
for (int i = 0; i < nPages; i++) {
|
||||
pages.push_back(
|
||||
image.extract_area(0, *pageHeight * i, image.width(), *pageHeight));
|
||||
}
|
||||
|
||||
// Embed each frame in the target size
|
||||
for (int i = 0; i < nPages; i++) {
|
||||
pages[i] = pages[i].embed(left, top, width, height, VImage::option()
|
||||
->set("extend", VIPS_EXTEND_BACKGROUND)
|
||||
->set("background", background));
|
||||
}
|
||||
|
||||
// Reassemble the frames into a tall, thin image
|
||||
VImage assembled = VImage::arrayjoin(pages,
|
||||
VImage::option()->set("across", 1));
|
||||
|
||||
// Update the page height
|
||||
*pageHeight = height;
|
||||
|
||||
return assembled;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace sharp
|
||||
|
@ -108,6 +108,18 @@ namespace sharp {
|
||||
*/
|
||||
VImage EnsureColourspace(VImage image, VipsInterpretation colourspace);
|
||||
|
||||
/*
|
||||
* Split and crop each frame, reassemble, and update pageHeight.
|
||||
*/
|
||||
VImage CropMultiPage(VImage image, int left, int top, int width, int height,
|
||||
int nPages, int *pageHeight);
|
||||
|
||||
/*
|
||||
* Split into frames, embed each frame, reassemble, and update pageHeight.
|
||||
*/
|
||||
VImage EmbedMultiPage(VImage image, int left, int top, int width, int height,
|
||||
std::vector<double> background, int nPages, int *pageHeight);
|
||||
|
||||
} // namespace sharp
|
||||
|
||||
#endif // SRC_OPERATIONS_H_
|
||||
|
430
src/pipeline.cc
@ -69,6 +69,15 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
std::tie(image, inputImageType) = sharp::OpenInput(baton->input);
|
||||
image = sharp::EnsureColourspace(image, baton->colourspaceInput);
|
||||
|
||||
int nPages = baton->input->pages;
|
||||
if (nPages == -1) {
|
||||
// Resolve the number of pages if we need to render until the end of the document
|
||||
nPages = image.get_typeof(VIPS_META_N_PAGES) != 0 ? image.get_int(VIPS_META_N_PAGES) : 1;
|
||||
}
|
||||
|
||||
// Get pre-resize page height
|
||||
int pageHeight = sharp::GetPageHeight(image);
|
||||
|
||||
// Calculate angle of rotation
|
||||
VipsAngle rotation;
|
||||
if (baton->useExifOrientation) {
|
||||
@ -104,194 +113,171 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
|
||||
// Pre extraction
|
||||
if (baton->topOffsetPre != -1) {
|
||||
image = image.extract_area(baton->leftOffsetPre, baton->topOffsetPre, baton->widthPre, baton->heightPre);
|
||||
image = nPages > 1
|
||||
? sharp::CropMultiPage(image,
|
||||
baton->leftOffsetPre, baton->topOffsetPre, baton->widthPre, baton->heightPre, nPages, &pageHeight)
|
||||
: image.extract_area(baton->leftOffsetPre, baton->topOffsetPre, baton->widthPre, baton->heightPre);
|
||||
}
|
||||
|
||||
// Get pre-resize image width and height
|
||||
int inputWidth = image.width();
|
||||
int inputHeight = image.height();
|
||||
if (!baton->rotateBeforePreExtract &&
|
||||
(rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) {
|
||||
// Swap input output width and height when rotating by 90 or 270 degrees
|
||||
std::swap(inputWidth, inputHeight);
|
||||
}
|
||||
|
||||
// If withoutEnlargement is specified,
|
||||
// Override target width and height if exceeds respective value from input file
|
||||
if (baton->withoutEnlargement) {
|
||||
if (baton->width > inputWidth) {
|
||||
baton->width = inputWidth;
|
||||
}
|
||||
if (baton->height > inputHeight) {
|
||||
baton->height = inputHeight;
|
||||
}
|
||||
// Is there just one page? Shrink to inputHeight instead
|
||||
if (nPages == 1) {
|
||||
pageHeight = inputHeight;
|
||||
}
|
||||
|
||||
// Scaling calculations
|
||||
double xfactor = 1.0;
|
||||
double yfactor = 1.0;
|
||||
double hshrink;
|
||||
double vshrink;
|
||||
int targetResizeWidth = baton->width;
|
||||
int targetResizeHeight = baton->height;
|
||||
if (baton->width > 0 && baton->height > 0) {
|
||||
// Fixed width and height
|
||||
xfactor = static_cast<double>(inputWidth) / static_cast<double>(baton->width);
|
||||
yfactor = static_cast<double>(inputHeight) / static_cast<double>(baton->height);
|
||||
switch (baton->canvas) {
|
||||
case Canvas::CROP:
|
||||
if (xfactor < yfactor) {
|
||||
targetResizeHeight = static_cast<int>(round(static_cast<double>(inputHeight) / xfactor));
|
||||
yfactor = xfactor;
|
||||
} else {
|
||||
targetResizeWidth = static_cast<int>(round(static_cast<double>(inputWidth) / yfactor));
|
||||
xfactor = yfactor;
|
||||
}
|
||||
break;
|
||||
case Canvas::EMBED:
|
||||
if (xfactor > yfactor) {
|
||||
targetResizeHeight = static_cast<int>(round(static_cast<double>(inputHeight) / xfactor));
|
||||
yfactor = xfactor;
|
||||
} else {
|
||||
targetResizeWidth = static_cast<int>(round(static_cast<double>(inputWidth) / yfactor));
|
||||
xfactor = yfactor;
|
||||
}
|
||||
break;
|
||||
case Canvas::MAX:
|
||||
if (xfactor > yfactor) {
|
||||
targetResizeHeight = baton->height = static_cast<int>(round(static_cast<double>(inputHeight) / xfactor));
|
||||
yfactor = xfactor;
|
||||
} else {
|
||||
targetResizeWidth = baton->width = static_cast<int>(round(static_cast<double>(inputWidth) / yfactor));
|
||||
xfactor = yfactor;
|
||||
}
|
||||
break;
|
||||
case Canvas::MIN:
|
||||
if (xfactor < yfactor) {
|
||||
targetResizeHeight = baton->height = static_cast<int>(round(static_cast<double>(inputHeight) / xfactor));
|
||||
yfactor = xfactor;
|
||||
} else {
|
||||
targetResizeWidth = baton->width = static_cast<int>(round(static_cast<double>(inputWidth) / yfactor));
|
||||
xfactor = yfactor;
|
||||
}
|
||||
break;
|
||||
case Canvas::IGNORE_ASPECT:
|
||||
if (!baton->rotateBeforePreExtract &&
|
||||
(rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) {
|
||||
std::swap(xfactor, yfactor);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else if (baton->width > 0) {
|
||||
// Fixed width
|
||||
xfactor = static_cast<double>(inputWidth) / static_cast<double>(baton->width);
|
||||
if (baton->canvas == Canvas::IGNORE_ASPECT) {
|
||||
targetResizeHeight = baton->height = inputHeight;
|
||||
} else {
|
||||
// Auto height
|
||||
yfactor = xfactor;
|
||||
targetResizeHeight = baton->height = static_cast<int>(round(static_cast<double>(inputHeight) / yfactor));
|
||||
}
|
||||
} else if (baton->height > 0) {
|
||||
// Fixed height
|
||||
yfactor = static_cast<double>(inputHeight) / static_cast<double>(baton->height);
|
||||
if (baton->canvas == Canvas::IGNORE_ASPECT) {
|
||||
targetResizeWidth = baton->width = inputWidth;
|
||||
} else {
|
||||
// Auto width
|
||||
xfactor = yfactor;
|
||||
targetResizeWidth = baton->width = static_cast<int>(round(static_cast<double>(inputWidth) / xfactor));
|
||||
}
|
||||
} else {
|
||||
// Identity transform
|
||||
baton->width = inputWidth;
|
||||
baton->height = inputHeight;
|
||||
}
|
||||
|
||||
// Calculate integral box shrink
|
||||
int xshrink = std::max(1, static_cast<int>(floor(xfactor)));
|
||||
int yshrink = std::max(1, static_cast<int>(floor(yfactor)));
|
||||
// Swap input output width and height when rotating by 90 or 270 degrees
|
||||
bool swap = !baton->rotateBeforePreExtract && (rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270);
|
||||
|
||||
// Calculate residual float affine transformation
|
||||
double xresidual = static_cast<double>(xshrink) / xfactor;
|
||||
double yresidual = static_cast<double>(yshrink) / yfactor;
|
||||
// Shrink to pageHeight, so we work for multi-page images
|
||||
std::tie(hshrink, vshrink) = sharp::ResolveShrink(
|
||||
inputWidth, pageHeight, targetResizeWidth, targetResizeHeight,
|
||||
baton->canvas, swap, baton->withoutEnlargement);
|
||||
|
||||
// If integral x and y shrink are equal, try to use shrink-on-load for JPEG and WebP,
|
||||
// but not when applying gamma correction, pre-resize extract, trim or input colourspace
|
||||
int shrink_on_load = 1;
|
||||
// The jpeg preload shrink.
|
||||
int jpegShrinkOnLoad = 1;
|
||||
|
||||
int shrink_on_load_factor = 1;
|
||||
// Leave at least a factor of two for the final resize step, when fastShrinkOnLoad: false
|
||||
// for more consistent results and avoid occasional small image shifting
|
||||
if (!baton->fastShrinkOnLoad) {
|
||||
shrink_on_load_factor = 2;
|
||||
}
|
||||
if (
|
||||
xshrink == yshrink && xshrink >= 2 * shrink_on_load_factor &&
|
||||
(inputImageType == sharp::ImageType::JPEG || inputImageType == sharp::ImageType::WEBP) &&
|
||||
// WebP, PDF, SVG scale
|
||||
double scale = 1.0;
|
||||
|
||||
// Try to reload input using shrink-on-load for JPEG, WebP, SVG and PDF, when:
|
||||
// - the width or height parameters are specified;
|
||||
// - gamma correction doesn't need to be applied;
|
||||
// - trimming or pre-resize extract isn't required;
|
||||
// - input colourspace is not specified;
|
||||
bool const shouldPreShrink = (targetResizeWidth > 0 || targetResizeHeight > 0) &&
|
||||
baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimThreshold == 0.0 &&
|
||||
baton->colourspaceInput == VIPS_INTERPRETATION_LAST &&
|
||||
image.width() > 3 && image.height() > 3 && baton->input->pages == 1
|
||||
) {
|
||||
if (xshrink >= 8 * shrink_on_load_factor) {
|
||||
xfactor = xfactor / 8;
|
||||
yfactor = yfactor / 8;
|
||||
shrink_on_load = 8;
|
||||
} else if (xshrink >= 4 * shrink_on_load_factor) {
|
||||
xfactor = xfactor / 4;
|
||||
yfactor = yfactor / 4;
|
||||
shrink_on_load = 4;
|
||||
} else if (xshrink >= 2 * shrink_on_load_factor) {
|
||||
xfactor = xfactor / 2;
|
||||
yfactor = yfactor / 2;
|
||||
shrink_on_load = 2;
|
||||
baton->colourspaceInput == VIPS_INTERPRETATION_LAST;
|
||||
|
||||
if (shouldPreShrink) {
|
||||
// The common part of the shrink: the bit by which both axes must be shrunk
|
||||
double shrink = std::min(hshrink, vshrink);
|
||||
|
||||
if (inputImageType == sharp::ImageType::JPEG) {
|
||||
// Leave at least a factor of two for the final resize step, when fastShrinkOnLoad: false
|
||||
// for more consistent results and avoid occasional small image shifting
|
||||
int factor = baton->fastShrinkOnLoad ? 1 : 2;
|
||||
if (shrink >= 8 * factor) {
|
||||
jpegShrinkOnLoad = 8;
|
||||
} else if (shrink >= 4 * factor) {
|
||||
jpegShrinkOnLoad = 4;
|
||||
} else if (shrink >= 2 * factor) {
|
||||
jpegShrinkOnLoad = 2;
|
||||
}
|
||||
} else if (inputImageType == sharp::ImageType::WEBP ||
|
||||
inputImageType == sharp::ImageType::SVG ||
|
||||
inputImageType == sharp::ImageType::PDF) {
|
||||
scale = 1.0 / shrink;
|
||||
}
|
||||
}
|
||||
// Help ensure a final kernel-based reduction to prevent shrink aliasing
|
||||
if (shrink_on_load > 1 && (xresidual == 1.0 || yresidual == 1.0)) {
|
||||
shrink_on_load = shrink_on_load / 2;
|
||||
xfactor = xfactor * 2;
|
||||
yfactor = yfactor * 2;
|
||||
}
|
||||
if (shrink_on_load > 1) {
|
||||
// Reload input using shrink-on-load
|
||||
|
||||
// Reload input using shrink-on-load, it'll be an integer shrink
|
||||
// factor for jpegload*, a double scale factor for webpload*,
|
||||
// pdfload* and svgload*
|
||||
if (jpegShrinkOnLoad > 1) {
|
||||
vips::VOption *option = VImage::option()
|
||||
->set("access", baton->input->access)
|
||||
->set("shrink", shrink_on_load)
|
||||
->set("shrink", jpegShrinkOnLoad)
|
||||
->set("fail", baton->input->failOnError);
|
||||
if (baton->input->buffer != nullptr) {
|
||||
// Reload JPEG buffer
|
||||
VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength);
|
||||
if (inputImageType == sharp::ImageType::JPEG) {
|
||||
// Reload JPEG buffer
|
||||
image = VImage::jpegload_buffer(blob, option);
|
||||
} else {
|
||||
// Reload WebP buffer
|
||||
image = VImage::webpload_buffer(blob, option);
|
||||
}
|
||||
image = VImage::jpegload_buffer(blob, option);
|
||||
vips_area_unref(reinterpret_cast<VipsArea*>(blob));
|
||||
} else {
|
||||
if (inputImageType == sharp::ImageType::JPEG) {
|
||||
// Reload JPEG file
|
||||
image = VImage::jpegload(const_cast<char*>(baton->input->file.data()), option);
|
||||
// Reload JPEG file
|
||||
image = VImage::jpegload(const_cast<char*>(baton->input->file.data()), option);
|
||||
}
|
||||
} else if (scale != 1.0) {
|
||||
vips::VOption *option = VImage::option()
|
||||
->set("access", baton->input->access)
|
||||
->set("scale", scale)
|
||||
->set("fail", baton->input->failOnError);
|
||||
if (inputImageType == sharp::ImageType::WEBP) {
|
||||
option->set("n", baton->input->pages);
|
||||
option->set("page", baton->input->page);
|
||||
|
||||
if (baton->input->buffer != nullptr) {
|
||||
// Reload WebP buffer
|
||||
VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength);
|
||||
image = VImage::webpload_buffer(blob, option);
|
||||
vips_area_unref(reinterpret_cast<VipsArea*>(blob));
|
||||
} else {
|
||||
// Reload WebP file
|
||||
image = VImage::webpload(const_cast<char*>(baton->input->file.data()), option);
|
||||
}
|
||||
}
|
||||
// Recalculate integral shrink and double residual
|
||||
int const shrunkOnLoadWidth = image.width();
|
||||
int const shrunkOnLoadHeight = image.height();
|
||||
if (!baton->rotateBeforePreExtract &&
|
||||
(rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) {
|
||||
// Swap when rotating by 90 or 270 degrees
|
||||
xfactor = static_cast<double>(shrunkOnLoadWidth) / static_cast<double>(targetResizeHeight);
|
||||
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeWidth);
|
||||
} else {
|
||||
xfactor = static_cast<double>(shrunkOnLoadWidth) / static_cast<double>(targetResizeWidth);
|
||||
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeHeight);
|
||||
} else if (inputImageType == sharp::ImageType::SVG) {
|
||||
option->set("unlimited", baton->input->unlimited);
|
||||
option->set("dpi", baton->input->density);
|
||||
|
||||
if (baton->input->buffer != nullptr) {
|
||||
// Reload SVG buffer
|
||||
VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength);
|
||||
image = VImage::svgload_buffer(blob, option);
|
||||
vips_area_unref(reinterpret_cast<VipsArea*>(blob));
|
||||
} else {
|
||||
// Reload SVG file
|
||||
image = VImage::svgload(const_cast<char*>(baton->input->file.data()), option);
|
||||
}
|
||||
|
||||
sharp::SetDensity(image, baton->input->density);
|
||||
} else if (inputImageType == sharp::ImageType::PDF) {
|
||||
option->set("n", baton->input->pages);
|
||||
option->set("page", baton->input->page);
|
||||
option->set("dpi", baton->input->density);
|
||||
|
||||
if (baton->input->buffer != nullptr) {
|
||||
// Reload PDF buffer
|
||||
VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength);
|
||||
image = VImage::pdfload_buffer(blob, option);
|
||||
vips_area_unref(reinterpret_cast<VipsArea*>(blob));
|
||||
} else {
|
||||
// Reload PDF file
|
||||
image = VImage::pdfload(const_cast<char*>(baton->input->file.data()), option);
|
||||
}
|
||||
|
||||
sharp::SetDensity(image, baton->input->density);
|
||||
}
|
||||
}
|
||||
// Remove animation properties from single page images
|
||||
if (baton->input->pages == 1) {
|
||||
image = sharp::RemoveAnimationProperties(image);
|
||||
|
||||
// Any pre-shrinking may already have been done
|
||||
int thumbWidth = image.width();
|
||||
int thumbHeight = image.height();
|
||||
|
||||
// After pre-shrink, but before the main shrink stage
|
||||
// Reuse the initial pageHeight if we didn't pre-shrink
|
||||
int preshrunkPageHeight = shouldPreShrink ? sharp::GetPageHeight(image) : pageHeight;
|
||||
|
||||
if (baton->fastShrinkOnLoad && jpegShrinkOnLoad > 1) {
|
||||
// JPEG shrink-on-load rounds the output dimensions down, which
|
||||
// may cause incorrect dimensions when fastShrinkOnLoad is enabled
|
||||
// Just recalculate vshrink / hshrink on the main image instead of
|
||||
// the pre-shrunk image when this is the case
|
||||
hshrink = static_cast<double>(thumbWidth) / (static_cast<double>(inputWidth) / hshrink);
|
||||
vshrink = static_cast<double>(preshrunkPageHeight) / (static_cast<double>(pageHeight) / vshrink);
|
||||
} else {
|
||||
// Shrink to preshrunkPageHeight, so we work for multi-page images
|
||||
std::tie(hshrink, vshrink) = sharp::ResolveShrink(
|
||||
thumbWidth, preshrunkPageHeight, targetResizeWidth, targetResizeHeight,
|
||||
baton->canvas, swap, baton->withoutEnlargement);
|
||||
}
|
||||
|
||||
int targetHeight = static_cast<int>(std::rint(static_cast<double>(preshrunkPageHeight) / vshrink));
|
||||
int targetPageHeight = targetHeight;
|
||||
|
||||
// In toilet-roll mode, we must adjust vshrink so that we exactly hit
|
||||
// preshrunkPageHeight or we'll have pixels straddling pixel boundaries
|
||||
if (thumbHeight > preshrunkPageHeight) {
|
||||
targetHeight *= nPages;
|
||||
vshrink = static_cast<double>(thumbHeight) / targetHeight;
|
||||
}
|
||||
|
||||
// Ensure we're using a device-independent colour space
|
||||
@ -345,7 +331,7 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
image = image.colourspace(VIPS_INTERPRETATION_B_W);
|
||||
}
|
||||
|
||||
bool const shouldResize = xfactor != 1.0 || yfactor != 1.0;
|
||||
bool const shouldResize = hshrink != 1.0 || vshrink != 1.0;
|
||||
bool const shouldBlur = baton->blurSigma != 0.0;
|
||||
bool const shouldConv = baton->convKernelWidth * baton->convKernelHeight > 0;
|
||||
bool const shouldSharpen = baton->sharpenSigma != 0.0;
|
||||
@ -379,21 +365,8 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
) {
|
||||
throw vips::VError("Unknown kernel");
|
||||
}
|
||||
// Ensure shortest edge is at least 1 pixel
|
||||
if (image.width() / xfactor < 0.5) {
|
||||
xfactor = 2 * image.width();
|
||||
if (baton->canvas != Canvas::EMBED) {
|
||||
baton->width = 1;
|
||||
}
|
||||
}
|
||||
if (image.height() / yfactor < 0.5) {
|
||||
yfactor = 2 * image.height();
|
||||
if (baton->canvas != Canvas::EMBED) {
|
||||
baton->height = 1;
|
||||
}
|
||||
}
|
||||
image = image.resize(1.0 / xfactor, VImage::option()
|
||||
->set("vscale", 1.0 / yfactor)
|
||||
image = image.resize(1.0 / hshrink, VImage::option()
|
||||
->set("vscale", 1.0 / vshrink)
|
||||
->set("kernel", kernel));
|
||||
}
|
||||
|
||||
@ -429,52 +402,67 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
image = image.copy(VImage::option()->set("interpretation", baton->colourspace));
|
||||
}
|
||||
|
||||
inputWidth = image.width();
|
||||
inputHeight = nPages > 1 ? targetPageHeight : image.height();
|
||||
|
||||
// Resolve dimensions
|
||||
if (baton->width <= 0) {
|
||||
baton->width = inputWidth;
|
||||
}
|
||||
if (baton->height <= 0) {
|
||||
baton->height = inputHeight;
|
||||
}
|
||||
|
||||
// Crop/embed
|
||||
if (image.width() != baton->width || image.height() != baton->height) {
|
||||
if (baton->canvas == Canvas::EMBED) {
|
||||
if (inputWidth != baton->width || inputHeight != baton->height) {
|
||||
if (baton->canvas == sharp::Canvas::EMBED) {
|
||||
std::vector<double> background;
|
||||
std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground, shouldPremultiplyAlpha);
|
||||
|
||||
// Embed
|
||||
|
||||
// Calculate where to position the embeded image if gravity specified, else center.
|
||||
// Calculate where to position the embedded image if gravity specified, else center.
|
||||
int left;
|
||||
int top;
|
||||
|
||||
left = static_cast<int>(round((baton->width - image.width()) / 2));
|
||||
top = static_cast<int>(round((baton->height - image.height()) / 2));
|
||||
left = static_cast<int>(round((baton->width - inputWidth) / 2));
|
||||
top = static_cast<int>(round((baton->height - inputHeight) / 2));
|
||||
|
||||
int width = std::max(image.width(), baton->width);
|
||||
int height = std::max(image.height(), baton->height);
|
||||
int width = std::max(inputWidth, baton->width);
|
||||
int height = std::max(inputHeight, baton->height);
|
||||
std::tie(left, top) = sharp::CalculateEmbedPosition(
|
||||
image.width(), image.height(), baton->width, baton->height, baton->position);
|
||||
inputWidth, inputHeight, baton->width, baton->height, baton->position);
|
||||
|
||||
image = image.embed(left, top, width, height, VImage::option()
|
||||
->set("extend", VIPS_EXTEND_BACKGROUND)
|
||||
->set("background", background));
|
||||
image = nPages > 1
|
||||
? sharp::EmbedMultiPage(image,
|
||||
left, top, width, height, background, nPages, &targetPageHeight)
|
||||
: image.embed(left, top, width, height, VImage::option()
|
||||
->set("extend", VIPS_EXTEND_BACKGROUND)
|
||||
->set("background", background));
|
||||
} else if (baton->canvas == sharp::Canvas::CROP) {
|
||||
if (baton->width > inputWidth) {
|
||||
baton->width = inputWidth;
|
||||
}
|
||||
if (baton->height > inputHeight) {
|
||||
baton->height = inputHeight;
|
||||
}
|
||||
|
||||
} else if (
|
||||
baton->canvas != Canvas::IGNORE_ASPECT &&
|
||||
(image.width() > baton->width || image.height() > baton->height)
|
||||
) {
|
||||
// Crop/max/min
|
||||
// Crop
|
||||
if (baton->position < 9) {
|
||||
// Gravity-based crop
|
||||
int left;
|
||||
int top;
|
||||
std::tie(left, top) = sharp::CalculateCrop(
|
||||
image.width(), image.height(), baton->width, baton->height, baton->position);
|
||||
int width = std::min(image.width(), baton->width);
|
||||
int height = std::min(image.height(), baton->height);
|
||||
image = image.extract_area(left, top, width, height);
|
||||
} else {
|
||||
inputWidth, inputHeight, baton->width, baton->height, baton->position);
|
||||
int width = std::min(inputWidth, baton->width);
|
||||
int height = std::min(inputHeight, baton->height);
|
||||
|
||||
image = nPages > 1
|
||||
? sharp::CropMultiPage(image,
|
||||
left, top, width, height, nPages, &targetPageHeight)
|
||||
: image.extract_area(left, top, width, height);
|
||||
} else if (nPages == 1) { // Skip smart crop for multi-page images
|
||||
// Attention-based or Entropy-based crop
|
||||
if (baton->width > image.width()) {
|
||||
baton->width = image.width();
|
||||
}
|
||||
if (baton->height > image.height()) {
|
||||
baton->height = image.height();
|
||||
}
|
||||
image = image.tilecache(VImage::option()
|
||||
->set("access", VIPS_ACCESS_RANDOM)
|
||||
->set("threaded", TRUE));
|
||||
@ -496,8 +484,17 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
|
||||
// Post extraction
|
||||
if (baton->topOffsetPost != -1) {
|
||||
image = image.extract_area(
|
||||
baton->leftOffsetPost, baton->topOffsetPost, baton->widthPost, baton->heightPost);
|
||||
if (nPages > 1) {
|
||||
image = sharp::CropMultiPage(image,
|
||||
baton->leftOffsetPost, baton->topOffsetPost, baton->widthPost, baton->heightPost,
|
||||
nPages, &targetPageHeight);
|
||||
|
||||
// heightPost is used in the info object, so update to reflect the number of pages
|
||||
baton->heightPost *= nPages;
|
||||
} else {
|
||||
image = image.extract_area(
|
||||
baton->leftOffsetPost, baton->topOffsetPost, baton->widthPost, baton->heightPost);
|
||||
}
|
||||
}
|
||||
|
||||
// Affine transform
|
||||
@ -519,10 +516,13 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
|
||||
// Embed
|
||||
baton->width = image.width() + baton->extendLeft + baton->extendRight;
|
||||
baton->height = image.height() + baton->extendTop + baton->extendBottom;
|
||||
baton->height = (nPages > 1 ? targetPageHeight : image.height()) + baton->extendTop + baton->extendBottom;
|
||||
|
||||
image = image.embed(baton->extendLeft, baton->extendTop, baton->width, baton->height,
|
||||
VImage::option()->set("extend", VIPS_EXTEND_BACKGROUND)->set("background", background));
|
||||
image = nPages > 1
|
||||
? sharp::EmbedMultiPage(image,
|
||||
baton->extendLeft, baton->extendTop, baton->width, baton->height, background, nPages, &targetPageHeight)
|
||||
: image.embed(baton->extendLeft, baton->extendTop, baton->width, baton->height,
|
||||
VImage::option()->set("extend", VIPS_EXTEND_BACKGROUND)->set("background", background));
|
||||
}
|
||||
// Median - must happen before blurring, due to the utility of blurring after thresholding
|
||||
if (shouldApplyMedian) {
|
||||
@ -763,10 +763,7 @@ class PipelineWorker : public Napi::AsyncWorker {
|
||||
baton->height = image.height();
|
||||
|
||||
image = sharp::SetAnimationProperties(
|
||||
image,
|
||||
baton->pageHeight,
|
||||
baton->delay,
|
||||
baton->loop);
|
||||
image, nPages, targetPageHeight, baton->delay, baton->loop);
|
||||
|
||||
// Output
|
||||
sharp::SetTimeout(image, baton->timeoutSeconds);
|
||||
@ -1317,15 +1314,15 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
|
||||
// Canvas option
|
||||
std::string canvas = sharp::AttrAsStr(options, "canvas");
|
||||
if (canvas == "crop") {
|
||||
baton->canvas = Canvas::CROP;
|
||||
baton->canvas = sharp::Canvas::CROP;
|
||||
} else if (canvas == "embed") {
|
||||
baton->canvas = Canvas::EMBED;
|
||||
baton->canvas = sharp::Canvas::EMBED;
|
||||
} else if (canvas == "max") {
|
||||
baton->canvas = Canvas::MAX;
|
||||
baton->canvas = sharp::Canvas::MAX;
|
||||
} else if (canvas == "min") {
|
||||
baton->canvas = Canvas::MIN;
|
||||
baton->canvas = sharp::Canvas::MIN;
|
||||
} else if (canvas == "ignore_aspect") {
|
||||
baton->canvas = Canvas::IGNORE_ASPECT;
|
||||
baton->canvas = sharp::Canvas::IGNORE_ASPECT;
|
||||
}
|
||||
// Tint chroma
|
||||
baton->tintA = sharp::AttrAsDouble(options, "tintA");
|
||||
@ -1520,10 +1517,7 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
|
||||
vips_enum_from_nick(nullptr, VIPS_TYPE_BAND_FORMAT,
|
||||
sharp::AttrAsStr(options, "rawDepth").data()));
|
||||
|
||||
// Animated output
|
||||
if (sharp::HasAttr(options, "pageHeight")) {
|
||||
baton->pageHeight = sharp::AttrAsUint32(options, "pageHeight");
|
||||
}
|
||||
// Animated output properties
|
||||
if (sharp::HasAttr(options, "loop")) {
|
||||
baton->loop = sharp::AttrAsUint32(options, "loop");
|
||||
}
|
||||
|
@ -27,14 +27,6 @@
|
||||
|
||||
Napi::Value pipeline(const Napi::CallbackInfo& info);
|
||||
|
||||
enum class Canvas {
|
||||
CROP,
|
||||
EMBED,
|
||||
MAX,
|
||||
MIN,
|
||||
IGNORE_ASPECT
|
||||
};
|
||||
|
||||
struct Composite {
|
||||
sharp::InputDescriptor *input;
|
||||
VipsBlendMode mode;
|
||||
@ -75,7 +67,7 @@ struct PipelineBaton {
|
||||
int width;
|
||||
int height;
|
||||
int channels;
|
||||
Canvas canvas;
|
||||
sharp::Canvas canvas;
|
||||
int position;
|
||||
std::vector<double> resizeBackground;
|
||||
bool hasCropOffset;
|
||||
@ -200,7 +192,6 @@ struct PipelineBaton {
|
||||
double ensureAlpha;
|
||||
VipsInterpretation colourspaceInput;
|
||||
VipsInterpretation colourspace;
|
||||
int pageHeight;
|
||||
std::vector<int> delay;
|
||||
int loop;
|
||||
int tileSize;
|
||||
@ -221,7 +212,7 @@ struct PipelineBaton {
|
||||
topOffsetPre(-1),
|
||||
topOffsetPost(-1),
|
||||
channels(0),
|
||||
canvas(Canvas::CROP),
|
||||
canvas(sharp::Canvas::CROP),
|
||||
position(0),
|
||||
resizeBackground{ 0.0, 0.0, 0.0, 255.0 },
|
||||
hasCropOffset(false),
|
||||
@ -334,7 +325,6 @@ struct PipelineBaton {
|
||||
ensureAlpha(-1.0),
|
||||
colourspaceInput(VIPS_INTERPRETATION_LAST),
|
||||
colourspace(VIPS_INTERPRETATION_LAST),
|
||||
pageHeight(0),
|
||||
delay{-1},
|
||||
loop(-1),
|
||||
tileSize(256),
|
||||
|
BIN
test/fixtures/expected/clahe-11-25-14.jpg
vendored
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
BIN
test/fixtures/expected/embed-animated-height.webp
vendored
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
test/fixtures/expected/embed-animated-width.webp
vendored
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
test/fixtures/expected/extend-equal-single.webp
vendored
Normal file
After Width: | Height: | Size: 8.0 KiB |
BIN
test/fixtures/expected/extract-lch.jpg
vendored
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 13 KiB |
BIN
test/fixtures/expected/gravity-center-height.webp
vendored
Normal file
After Width: | Height: | Size: 6.8 KiB |
BIN
test/fixtures/expected/gravity-center-width.webp
vendored
Normal file
After Width: | Height: | Size: 7.6 KiB |
BIN
test/fixtures/expected/hilutite.jpg
vendored
Before Width: | Height: | Size: 424 KiB After Width: | Height: | Size: 424 KiB |
BIN
test/fixtures/expected/icc-cmyk.jpg
vendored
Before Width: | Height: | Size: 943 KiB After Width: | Height: | Size: 943 KiB |
BIN
test/fixtures/expected/resize-crop-extract.jpg
vendored
Before Width: | Height: | Size: 1.4 KiB After Width: | Height: | Size: 1.4 KiB |
BIN
test/fixtures/expected/svg72.png
vendored
Before Width: | Height: | Size: 1.8 KiB After Width: | Height: | Size: 373 B |
BIN
test/fixtures/expected/tint-sepia.jpg
vendored
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
@ -27,7 +27,7 @@ describe('AVIF', () => {
|
||||
format: 'jpeg',
|
||||
hasAlpha: false,
|
||||
hasProfile: false,
|
||||
height: 13,
|
||||
height: 14,
|
||||
isProgressive: false,
|
||||
space: 'srgb',
|
||||
width: 32
|
||||
@ -50,7 +50,6 @@ describe('AVIF', () => {
|
||||
hasProfile: false,
|
||||
height: 26,
|
||||
isProgressive: false,
|
||||
pageHeight: 26,
|
||||
pagePrimary: 0,
|
||||
pages: 1,
|
||||
space: 'srgb',
|
||||
@ -71,9 +70,8 @@ describe('AVIF', () => {
|
||||
format: 'heif',
|
||||
hasAlpha: false,
|
||||
hasProfile: false,
|
||||
height: 12,
|
||||
height: 14,
|
||||
isProgressive: false,
|
||||
pageHeight: 12,
|
||||
pagePrimary: 0,
|
||||
pages: 1,
|
||||
space: 'srgb',
|
||||
@ -97,7 +95,6 @@ describe('AVIF', () => {
|
||||
hasProfile: false,
|
||||
height: 300,
|
||||
isProgressive: false,
|
||||
pageHeight: 300,
|
||||
pagePrimary: 0,
|
||||
pages: 1,
|
||||
space: 'srgb',
|
||||
|
@ -6,16 +6,30 @@ const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Extend', function () {
|
||||
it('extend all sides equally via a single value', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(120)
|
||||
.extend(10)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(140, info.width);
|
||||
assert.strictEqual(118, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('extend-equal-single.jpg'), data, done);
|
||||
});
|
||||
describe('extend all sides equally via a single value', function () {
|
||||
it('JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(120)
|
||||
.extend(10)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(140, info.width);
|
||||
assert.strictEqual(118, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('extend-equal-single.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Animated WebP', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize(120)
|
||||
.extend(10)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(140, info.width);
|
||||
assert.strictEqual(140 * 9, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('extend-equal-single.webp'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('extend all sides equally with RGB', function (done) {
|
||||
|
@ -39,10 +39,35 @@ describe('Partial image extraction', function () {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Animated WebP', function () {
|
||||
it('Before resize', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.extract({ left: 0, top: 30, width: 80, height: 20 })
|
||||
.resize(320, 80)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80 * 9, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('gravity-center-height.webp'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('After resize', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize(320, 320)
|
||||
.extract({ left: 0, top: 120, width: 320, height: 80 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80 * 9, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('gravity-center-height.webp'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('TIFF', function (done) {
|
||||
sharp(fixtures.inputTiff)
|
||||
.extract({ left: 34, top: 63, width: 341, height: 529 })
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(341, info.width);
|
||||
|
@ -80,12 +80,6 @@ describe('GIF input', () => {
|
||||
assert.strictEqual(true, reduced.length < original.length);
|
||||
});
|
||||
|
||||
it('invalid pageHeight throws', () => {
|
||||
assert.throws(() => {
|
||||
sharp().gif({ pageHeight: 0 });
|
||||
});
|
||||
});
|
||||
|
||||
it('invalid loop throws', () => {
|
||||
assert.throws(() => {
|
||||
sharp().gif({ loop: -1 });
|
||||
@ -97,7 +91,7 @@ describe('GIF input', () => {
|
||||
|
||||
it('invalid delay throws', () => {
|
||||
assert.throws(() => {
|
||||
sharp().gif({ delay: [-1] });
|
||||
sharp().gif({ delay: -1 });
|
||||
});
|
||||
assert.throws(() => {
|
||||
sharp().gif({ delay: [65536] });
|
||||
|
@ -194,6 +194,29 @@ describe('Image metadata', function () {
|
||||
|
||||
it('Animated WebP', () =>
|
||||
sharp(fixtures.inputWebPAnimated)
|
||||
.metadata()
|
||||
.then(({
|
||||
format, width, height, space, channels, depth,
|
||||
isProgressive, pages, loop, delay, hasProfile,
|
||||
hasAlpha
|
||||
}) => {
|
||||
assert.strictEqual(format, 'webp');
|
||||
assert.strictEqual(width, 80);
|
||||
assert.strictEqual(height, 80);
|
||||
assert.strictEqual(space, 'srgb');
|
||||
assert.strictEqual(channels, 4);
|
||||
assert.strictEqual(depth, 'uchar');
|
||||
assert.strictEqual(isProgressive, false);
|
||||
assert.strictEqual(pages, 9);
|
||||
assert.strictEqual(loop, 0);
|
||||
assert.deepStrictEqual(delay, [120, 120, 90, 120, 120, 90, 120, 90, 30]);
|
||||
assert.strictEqual(hasProfile, false);
|
||||
assert.strictEqual(hasAlpha, true);
|
||||
})
|
||||
);
|
||||
|
||||
it('Animated WebP with all pages', () =>
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.metadata()
|
||||
.then(({
|
||||
format, width, height, space, channels, depth,
|
||||
@ -202,7 +225,7 @@ describe('Image metadata', function () {
|
||||
}) => {
|
||||
assert.strictEqual(format, 'webp');
|
||||
assert.strictEqual(width, 80);
|
||||
assert.strictEqual(height, 80);
|
||||
assert.strictEqual(height, 720);
|
||||
assert.strictEqual(space, 'srgb');
|
||||
assert.strictEqual(channels, 4);
|
||||
assert.strictEqual(depth, 'uchar');
|
||||
@ -221,8 +244,8 @@ describe('Image metadata', function () {
|
||||
.metadata()
|
||||
.then(({
|
||||
format, width, height, space, channels, depth,
|
||||
isProgressive, pages, pageHeight, loop, delay,
|
||||
hasProfile, hasAlpha
|
||||
isProgressive, pages, loop, delay, hasProfile,
|
||||
hasAlpha
|
||||
}) => {
|
||||
assert.strictEqual(format, 'webp');
|
||||
assert.strictEqual(width, 370);
|
||||
@ -232,7 +255,6 @@ describe('Image metadata', function () {
|
||||
assert.strictEqual(depth, 'uchar');
|
||||
assert.strictEqual(isProgressive, false);
|
||||
assert.strictEqual(pages, 10);
|
||||
assert.strictEqual(pageHeight, 285);
|
||||
assert.strictEqual(loop, 3);
|
||||
assert.deepStrictEqual(delay, [...Array(9).fill(3000), 15000]);
|
||||
assert.strictEqual(hasProfile, false);
|
||||
@ -285,8 +307,8 @@ describe('Image metadata', function () {
|
||||
.metadata()
|
||||
.then(({
|
||||
format, width, height, space, channels, depth,
|
||||
isProgressive, pages, pageHeight, loop, delay,
|
||||
background, hasProfile, hasAlpha
|
||||
isProgressive, pages, loop, delay, background,
|
||||
hasProfile, hasAlpha
|
||||
}) => {
|
||||
assert.strictEqual(format, 'gif');
|
||||
assert.strictEqual(width, 80);
|
||||
@ -296,7 +318,6 @@ describe('Image metadata', function () {
|
||||
assert.strictEqual(depth, 'uchar');
|
||||
assert.strictEqual(isProgressive, false);
|
||||
assert.strictEqual(pages, 30);
|
||||
assert.strictEqual(pageHeight, 80);
|
||||
assert.strictEqual(loop, 0);
|
||||
assert.deepStrictEqual(delay, Array(30).fill(30));
|
||||
assert.deepStrictEqual(background, { r: 0, g: 0, b: 0 });
|
||||
@ -310,8 +331,8 @@ describe('Image metadata', function () {
|
||||
.metadata()
|
||||
.then(({
|
||||
format, width, height, space, channels, depth,
|
||||
isProgressive, pages, pageHeight, loop, delay,
|
||||
hasProfile, hasAlpha
|
||||
isProgressive, pages, loop, delay, hasProfile,
|
||||
hasAlpha
|
||||
}) => {
|
||||
assert.strictEqual(format, 'gif');
|
||||
assert.strictEqual(width, 370);
|
||||
@ -321,7 +342,6 @@ describe('Image metadata', function () {
|
||||
assert.strictEqual(depth, 'uchar');
|
||||
assert.strictEqual(isProgressive, false);
|
||||
assert.strictEqual(pages, 10);
|
||||
assert.strictEqual(pageHeight, 285);
|
||||
assert.strictEqual(loop, 2);
|
||||
assert.deepStrictEqual(delay, [...Array(9).fill(3000), 15000]);
|
||||
assert.strictEqual(hasProfile, false);
|
||||
@ -522,7 +542,7 @@ describe('Image metadata', function () {
|
||||
assert.strictEqual('Relative', profile.intent);
|
||||
assert.strictEqual('Printer', profile.deviceClass);
|
||||
});
|
||||
fixtures.assertSimilar(output, fixtures.path('expected/icc-cmyk.jpg'), { threshold: 0 }, done);
|
||||
fixtures.assertSimilar(output, fixtures.expected('icc-cmyk.jpg'), { threshold: 0 }, done);
|
||||
});
|
||||
});
|
||||
|
||||
@ -533,7 +553,7 @@ describe('Image metadata', function () {
|
||||
.withMetadata({ icc: fixtures.path('hilutite.icm') })
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
fixtures.assertMaxColourDistance(output, fixtures.path('expected/hilutite.jpg'), 9);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('hilutite.jpg'), 9);
|
||||
done();
|
||||
});
|
||||
});
|
||||
@ -737,7 +757,6 @@ describe('Image metadata', function () {
|
||||
depth: 'uchar',
|
||||
isProgressive: false,
|
||||
pages: 1,
|
||||
pageHeight: 858,
|
||||
pagePrimary: 0,
|
||||
compression: 'av1',
|
||||
hasProfile: false,
|
||||
|
@ -148,6 +148,42 @@ describe('Resize fit=contain', function () {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Animated WebP', function () {
|
||||
it('Width only', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize(320, 240, {
|
||||
fit: 'contain',
|
||||
background: { r: 255, g: 0, b: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('webp', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240 * 9, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-animated-width.webp'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Height only', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize(240, 320, {
|
||||
fit: 'contain',
|
||||
background: { r: 255, g: 0, b: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('webp', info.format);
|
||||
assert.strictEqual(240, info.width);
|
||||
assert.strictEqual(320 * 9, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-animated-height.webp'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid position values should fail', function () {
|
||||
[-1, 8.1, 9, 1000000, false, 'vallejo'].forEach(function (position) {
|
||||
assert.throws(function () {
|
||||
|
@ -269,6 +269,30 @@ describe('Resize fit=cover', function () {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Animated WebP', function () {
|
||||
it('Width only', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize(80, 320, { fit: sharp.fit.cover })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(80, info.width);
|
||||
assert.strictEqual(320 * 9, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('gravity-center-width.webp'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Height only', function (done) {
|
||||
sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize(320, 80, { fit: sharp.fit.cover })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80 * 9, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('gravity-center-height.webp'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Entropy-based strategy', function () {
|
||||
it('JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
|
@ -74,6 +74,27 @@ describe('SVG input', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Convert SVG to PNG utilizing scale-on-load', function (done) {
|
||||
const size = 1024;
|
||||
sharp(fixtures.inputSvgSmallViewBox)
|
||||
.resize(size)
|
||||
.toFormat('png')
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(size, info.width);
|
||||
assert.strictEqual(size, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('circle.png'), data, function (err) {
|
||||
if (err) throw err;
|
||||
sharp(data).metadata(function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(72, info.density);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Convert SVG to PNG at 14.4DPI', function (done) {
|
||||
sharp(fixtures.inputSvg, { density: 14.4 })
|
||||
.toFormat('png')
|
||||
|
@ -133,12 +133,6 @@ describe('WebP', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('invalid pageHeight throws', () => {
|
||||
assert.throws(() => {
|
||||
sharp().webp({ pageHeight: 0 });
|
||||
});
|
||||
});
|
||||
|
||||
it('invalid loop throws', () => {
|
||||
assert.throws(() => {
|
||||
sharp().webp({ loop: -1 });
|
||||
@ -151,7 +145,7 @@ describe('WebP', function () {
|
||||
|
||||
it('invalid delay throws', () => {
|
||||
assert.throws(() => {
|
||||
sharp().webp({ delay: [-1] });
|
||||
sharp().webp({ delay: -1 });
|
||||
});
|
||||
|
||||
assert.throws(() => {
|
||||
@ -159,16 +153,13 @@ describe('WebP', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('should double the number of frames with default delay', async () => {
|
||||
const original = await sharp(fixtures.inputWebPAnimated, { pages: -1 }).metadata();
|
||||
it('should repeat a single delay for all frames', async () => {
|
||||
const updated = await sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.webp({ pageHeight: original.pageHeight / 2 })
|
||||
.webp({ delay: 100 })
|
||||
.toBuffer()
|
||||
.then(data => sharp(data, { pages: -1 }).metadata());
|
||||
|
||||
assert.strictEqual(updated.pages, original.pages * 2);
|
||||
assert.strictEqual(updated.pageHeight, original.pageHeight / 2);
|
||||
assert.deepStrictEqual(updated.delay, [...original.delay, ...Array(9).fill(120)]);
|
||||
assert.deepStrictEqual(updated.delay, Array(updated.pages).fill(100));
|
||||
});
|
||||
|
||||
it('should limit animation loop', async () => {
|
||||
@ -216,22 +207,14 @@ describe('WebP', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove animation properties when loading single page', async () => {
|
||||
const data = await sharp(fixtures.inputGifAnimatedLoop3)
|
||||
it('should resize animated image to page height', async () => {
|
||||
const updated = await sharp(fixtures.inputWebPAnimated, { pages: -1 })
|
||||
.resize({ height: 570 })
|
||||
.webp({ effort: 0 })
|
||||
.toBuffer();
|
||||
const { size, ...metadata } = await sharp(data).metadata();
|
||||
assert.deepStrictEqual(metadata, {
|
||||
format: 'webp',
|
||||
width: 740,
|
||||
height: 570,
|
||||
space: 'srgb',
|
||||
channels: 3,
|
||||
depth: 'uchar',
|
||||
isProgressive: false,
|
||||
hasProfile: false,
|
||||
hasAlpha: false
|
||||
});
|
||||
.toBuffer()
|
||||
.then(data => sharp(data, { pages: -1 }).metadata());
|
||||
|
||||
assert.strictEqual(updated.height, 570 * 9);
|
||||
assert.strictEqual(updated.pageHeight, 570);
|
||||
});
|
||||
});
|
||||
|