diff --git a/lib/output.js b/lib/output.js index 0c28d195..620c61cb 100644 --- a/lib/output.js +++ b/lib/output.js @@ -458,9 +458,8 @@ function png (options) { * @param {boolean} [options.nearLossless=false] - use near_lossless compression mode * @param {boolean} [options.smartSubsample=false] - use high quality chroma subsampling * @param {number} [options.effort=4] - CPU effort, between 0 (fastest) and 6 (slowest) - * @param {number} [options.pageHeight] - page height for animated output * @param {number} [options.loop=0] - number of animation iterations, use 0 for infinite animation - * @param {number[]} [options.delay] - list of delays between animation frames (in milliseconds) + * @param {number|number[]} [options.delay] - delay(s) between animation frames (in milliseconds) * @param {boolean} [options.force=true] - force WebP output, otherwise attempt to use input format * @returns {Sharp} * @throws {Error} Invalid options @@ -527,10 +526,7 @@ function webp (options) { * width: 128, * height: 128 * pages * }) - * .gif({ - * pageHeight: 128, - * dither: 0 - * }) + * .gif({ dither: 0 }) * .toBuffer(); * * @param {Object} [options] - output options @@ -538,9 +534,8 @@ function webp (options) { * @param {number} [options.colors=256] - alternative spelling of `options.colours` * @param {number} [options.effort=7] - CPU effort, between 1 (fastest) and 10 (slowest) * @param {number} [options.dither=1.0] - level of Floyd-Steinberg error diffusion, between 0 (least) and 1 (most) - * @param {number} [options.pageHeight] - page height for animated output * @param {number} [options.loop=0] - number of animation iterations, use 0 for infinite animation - * @param {number[]} [options.delay] - list of delays between animation frames (in milliseconds) + * @param {number|number[]} [options.delay] - delay(s) between animation frames (in milliseconds) * @param {boolean} [options.force=true] - force GIF output, otherwise attempt to use input format * @returns {Sharp} * @throws {Error} Invalid options @@ -657,20 +652,12 @@ function jp2 (options) { * @private * * @param {Object} [source] - output options - * @param {number} [source.pageHeight] - page height for animated output * @param {number} [source.loop=0] - number of animation iterations, use 0 for infinite animation * @param {number[]} [source.delay] - list of delays between animation frames (in milliseconds) * @param {Object} [target] - target object for valid options * @throws {Error} Invalid options */ function trySetAnimationOptions (source, target) { - if (is.object(source) && is.defined(source.pageHeight)) { - if (is.integer(source.pageHeight) && source.pageHeight > 0) { - target.pageHeight = source.pageHeight; - } else { - throw is.invalidParameterError('pageHeight', 'integer larger than 0', source.pageHeight); - } - } if (is.object(source) && is.defined(source.loop)) { if (is.integer(source.loop) && is.inRange(source.loop, 0, 65535)) { target.loop = source.loop; @@ -679,13 +666,16 @@ function trySetAnimationOptions (source, target) { } } if (is.object(source) && is.defined(source.delay)) { - if ( + // We allow singular values as well + if (is.integer(source.delay) && is.inRange(source.delay, 0, 65535)) { + target.delay = [source.delay]; + } else if ( Array.isArray(source.delay) && source.delay.every(is.integer) && source.delay.every(v => is.inRange(v, 0, 65535))) { target.delay = source.delay; } else { - throw is.invalidParameterError('delay', 'array of integers between 0 and 65535', source.delay); + throw is.invalidParameterError('delay', 'integer or an array of integers between 0 and 65535', source.delay); } } } diff --git a/src/common.cc b/src/common.cc index b8d66ac1..e88b89bd 100644 --- a/src/common.cc +++ b/src/common.cc @@ -490,30 +490,24 @@ namespace sharp { /* Set animation properties if necessary. - Non-provided properties will be loaded from image. */ - VImage SetAnimationProperties(VImage image, int pageHeight, std::vector delay, int loop) { - bool hasDelay = delay.size() != 1 || delay.front() != -1; + VImage SetAnimationProperties(VImage image, int nPages, int pageHeight, std::vector delay, int loop) { + bool hasDelay = !delay.empty(); - if (pageHeight == 0 && image.get_typeof(VIPS_META_PAGE_HEIGHT) == G_TYPE_INT) { - pageHeight = image.get_int(VIPS_META_PAGE_HEIGHT); + // Avoid a copy if none of the animation properties are needed. + if (nPages == 1 && !hasDelay && loop == -1) return image; + + if (delay.size() == 1) { + // We have just one delay, repeat that value for all frames. + delay.insert(delay.end(), nPages - 1, delay[0]); } - if (!hasDelay && image.get_typeof("delay") == VIPS_TYPE_ARRAY_INT) { - delay = image.get_array_int("delay"); - hasDelay = true; - } - - if (loop == -1 && image.get_typeof("loop") == G_TYPE_INT) { - loop = image.get_int("loop"); - } - - if (pageHeight == 0) return image; - - // It is necessary to create the copy as otherwise, pageHeight will be ignored! + // Attaching metadata, need to copy the image. VImage copy = image.copy(); - copy.set(VIPS_META_PAGE_HEIGHT, pageHeight); + // Only set page-height if we have more than one page, or this could + // accidentally turn into an animated image later. + if (nPages > 1) copy.set(VIPS_META_PAGE_HEIGHT, pageHeight); if (hasDelay) copy.set("delay", delay); if (loop != -1) copy.set("loop", loop); @@ -556,6 +550,14 @@ namespace sharp { return copy; } + /* + Multi-page images can have a page height. Fetch it, and sanity check it. + If page-height is not set, it defaults to the image height + */ + int GetPageHeight(VImage image) { + return vips_image_get_page_height(image.get_image()); + } + /* Check the proposed format supports the current dimensions. */ @@ -882,4 +884,74 @@ namespace sharp { return image; } + std::pair ResolveShrink(int width, int height, int targetWidth, int targetHeight, + Canvas canvas, bool swap, bool withoutEnlargement) { + if (swap) { + // Swap input width and height when requested. + std::swap(width, height); + } + + double hshrink = 1.0; + double vshrink = 1.0; + + if (targetWidth > 0 && targetHeight > 0) { + // Fixed width and height + hshrink = static_cast(width) / targetWidth; + vshrink = static_cast(height) / targetHeight; + + switch (canvas) { + case Canvas::CROP: + case Canvas::MIN: + if (hshrink < vshrink) { + vshrink = hshrink; + } else { + hshrink = vshrink; + } + break; + case Canvas::EMBED: + case Canvas::MAX: + if (hshrink > vshrink) { + vshrink = hshrink; + } else { + hshrink = vshrink; + } + break; + case Canvas::IGNORE_ASPECT: + if (swap) { + std::swap(hshrink, vshrink); + } + break; + } + } else if (targetWidth > 0) { + // Fixed width + hshrink = static_cast(width) / targetWidth; + + if (canvas != Canvas::IGNORE_ASPECT) { + // Auto height + vshrink = hshrink; + } + } else if (targetHeight > 0) { + // Fixed height + vshrink = static_cast(height) / targetHeight; + + if (canvas != Canvas::IGNORE_ASPECT) { + // Auto width + hshrink = vshrink; + } + } + + // We should not enlarge (oversample) the output image, + // if withoutEnlargement is specified. + if (withoutEnlargement) { + hshrink = std::max(1.0, hshrink); + vshrink = std::max(1.0, vshrink); + } + + // We don't want to shrink so much that we send an axis to 0 + hshrink = std::min(hshrink, static_cast(width)); + vshrink = std::min(vshrink, static_cast(height)); + + return std::make_pair(hshrink, vshrink); + } + } // namespace sharp diff --git a/src/common.h b/src/common.h index 642c5048..f94bb13c 100644 --- a/src/common.h +++ b/src/common.h @@ -135,6 +135,14 @@ namespace sharp { MISSING }; + enum class Canvas { + CROP, + EMBED, + MAX, + MIN, + IGNORE_ASPECT + }; + // How many tasks are in the queue? extern volatile int counterQueue; @@ -208,9 +216,8 @@ namespace sharp { /* Set animation properties if necessary. - Non-provided properties will be loaded from image. */ - VImage SetAnimationProperties(VImage image, int pageHeight, std::vector delay, int loop); + VImage SetAnimationProperties(VImage image, int nPages, int pageHeight, std::vector delay, int loop); /* Remove animation properties from image. @@ -232,6 +239,12 @@ namespace sharp { */ VImage SetDensity(VImage image, const double density); + /* + Multi-page images can have a page height. Fetch it, and sanity check it. + If page-height is not set, it defaults to the image height + */ + int GetPageHeight(VImage image); + /* Check the proposed format supports the current dimensions. */ @@ -325,6 +338,15 @@ namespace sharp { */ VImage EnsureAlpha(VImage image, double const value); + /* + Calculate the shrink factor, taking into account auto-rotate, the canvas + mode, and so on. The hshrink/vshrink are the amount to shrink the input + image axes by in order for the output axes (ie. after rotation) to match + the required thumbnail width/height and canvas mode. + */ + std::pair ResolveShrink(int width, int height, int targetWidth, int targetHeight, + Canvas canvas, bool swap, bool withoutEnlargement); + } // namespace sharp #endif // SRC_COMMON_H_ diff --git a/src/metadata.cc b/src/metadata.cc index 219ea589..9143b594 100644 --- a/src/metadata.cc +++ b/src/metadata.cc @@ -180,8 +180,6 @@ class MetadataWorker : public Napi::AsyncWorker { } if (baton->pageHeight > 0) { info.Set("pageHeight", baton->pageHeight); - } else if (baton->pages > 0) { - info.Set("pageHeight", baton->height); } if (baton->loop >= 0) { info.Set("loop", baton->loop); diff --git a/src/operations.cc b/src/operations.cc index b58325c9..3a3566e0 100644 --- a/src/operations.cc +++ b/src/operations.cc @@ -308,4 +308,98 @@ namespace sharp { return image; } + /* + * Split and crop each frame, reassemble, and update pageHeight. + */ + VImage CropMultiPage(VImage image, int left, int top, int width, int height, + int nPages, int *pageHeight) { + if (top == 0 && height == *pageHeight) { + // Fast path; no need to adjust the height of the multi-page image + return image.extract_area(left, 0, width, image.height()); + } else { + std::vector pages; + pages.reserve(nPages); + + // Split the image into cropped frames + for (int i = 0; i < nPages; i++) { + pages.push_back( + image.extract_area(left, *pageHeight * i + top, width, height)); + } + + // Reassemble the frames into a tall, thin image + VImage assembled = VImage::arrayjoin(pages, + VImage::option()->set("across", 1)); + + // Update the page height + *pageHeight = height; + + return assembled; + } + } + + /* + * Split into frames, embed each frame, reassemble, and update pageHeight. + */ + VImage EmbedMultiPage(VImage image, int left, int top, int width, int height, + std::vector background, int nPages, int *pageHeight) { + if (top == 0 && height == *pageHeight) { + // Fast path; no need to adjust the height of the multi-page image + return image.embed(left, 0, width, image.height(), VImage::option() + ->set("extend", VIPS_EXTEND_BACKGROUND) + ->set("background", background)); + } else if (left == 0 && width == image.width()) { + // Fast path; no need to adjust the width of the multi-page image + std::vector pages; + pages.reserve(nPages); + + // Rearrange the tall image into a vertical grid + image = image.grid(*pageHeight, nPages, 1); + + // Do the embed on the wide image + image = image.embed(0, top, image.width(), height, VImage::option() + ->set("extend", VIPS_EXTEND_BACKGROUND) + ->set("background", background)); + + // Split the wide image into frames + for (int i = 0; i < nPages; i++) { + pages.push_back( + image.extract_area(width * i, 0, width, height)); + } + + // Reassemble the frames into a tall, thin image + VImage assembled = VImage::arrayjoin(pages, + VImage::option()->set("across", 1)); + + // Update the page height + *pageHeight = height; + + return assembled; + } else { + std::vector pages; + pages.reserve(nPages); + + // Split the image into frames + for (int i = 0; i < nPages; i++) { + pages.push_back( + image.extract_area(0, *pageHeight * i, image.width(), *pageHeight)); + } + + // Embed each frame in the target size + for (int i = 0; i < nPages; i++) { + pages[i] = pages[i].embed(left, top, width, height, VImage::option() + ->set("extend", VIPS_EXTEND_BACKGROUND) + ->set("background", background)); + } + + // Reassemble the frames into a tall, thin image + VImage assembled = VImage::arrayjoin(pages, + VImage::option()->set("across", 1)); + + // Update the page height + *pageHeight = height; + + return assembled; + } + } + } // namespace sharp diff --git a/src/operations.h b/src/operations.h index 13270437..6d1f1ca3 100644 --- a/src/operations.h +++ b/src/operations.h @@ -108,6 +108,18 @@ namespace sharp { */ VImage EnsureColourspace(VImage image, VipsInterpretation colourspace); + /* + * Split and crop each frame, reassemble, and update pageHeight. + */ + VImage CropMultiPage(VImage image, int left, int top, int width, int height, + int nPages, int *pageHeight); + + /* + * Split into frames, embed each frame, reassemble, and update pageHeight. + */ + VImage EmbedMultiPage(VImage image, int left, int top, int width, int height, + std::vector background, int nPages, int *pageHeight); + } // namespace sharp #endif // SRC_OPERATIONS_H_ diff --git a/src/pipeline.cc b/src/pipeline.cc index 74a8b148..a6a041a0 100644 --- a/src/pipeline.cc +++ b/src/pipeline.cc @@ -69,6 +69,15 @@ class PipelineWorker : public Napi::AsyncWorker { std::tie(image, inputImageType) = sharp::OpenInput(baton->input); image = sharp::EnsureColourspace(image, baton->colourspaceInput); + int nPages = baton->input->pages; + if (nPages == -1) { + // Resolve the number of pages if we need to render until the end of the document + nPages = image.get_typeof(VIPS_META_N_PAGES) != 0 ? image.get_int(VIPS_META_N_PAGES) : 1; + } + + // Get pre-resize page height + int pageHeight = sharp::GetPageHeight(image); + // Calculate angle of rotation VipsAngle rotation; if (baton->useExifOrientation) { @@ -104,194 +113,171 @@ class PipelineWorker : public Napi::AsyncWorker { // Pre extraction if (baton->topOffsetPre != -1) { - image = image.extract_area(baton->leftOffsetPre, baton->topOffsetPre, baton->widthPre, baton->heightPre); + image = nPages > 1 + ? sharp::CropMultiPage(image, + baton->leftOffsetPre, baton->topOffsetPre, baton->widthPre, baton->heightPre, nPages, &pageHeight) + : image.extract_area(baton->leftOffsetPre, baton->topOffsetPre, baton->widthPre, baton->heightPre); } // Get pre-resize image width and height int inputWidth = image.width(); int inputHeight = image.height(); - if (!baton->rotateBeforePreExtract && - (rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) { - // Swap input output width and height when rotating by 90 or 270 degrees - std::swap(inputWidth, inputHeight); - } - // If withoutEnlargement is specified, - // Override target width and height if exceeds respective value from input file - if (baton->withoutEnlargement) { - if (baton->width > inputWidth) { - baton->width = inputWidth; - } - if (baton->height > inputHeight) { - baton->height = inputHeight; - } + // Is there just one page? Shrink to inputHeight instead + if (nPages == 1) { + pageHeight = inputHeight; } // Scaling calculations - double xfactor = 1.0; - double yfactor = 1.0; + double hshrink; + double vshrink; int targetResizeWidth = baton->width; int targetResizeHeight = baton->height; - if (baton->width > 0 && baton->height > 0) { - // Fixed width and height - xfactor = static_cast(inputWidth) / static_cast(baton->width); - yfactor = static_cast(inputHeight) / static_cast(baton->height); - switch (baton->canvas) { - case Canvas::CROP: - if (xfactor < yfactor) { - targetResizeHeight = static_cast(round(static_cast(inputHeight) / xfactor)); - yfactor = xfactor; - } else { - targetResizeWidth = static_cast(round(static_cast(inputWidth) / yfactor)); - xfactor = yfactor; - } - break; - case Canvas::EMBED: - if (xfactor > yfactor) { - targetResizeHeight = static_cast(round(static_cast(inputHeight) / xfactor)); - yfactor = xfactor; - } else { - targetResizeWidth = static_cast(round(static_cast(inputWidth) / yfactor)); - xfactor = yfactor; - } - break; - case Canvas::MAX: - if (xfactor > yfactor) { - targetResizeHeight = baton->height = static_cast(round(static_cast(inputHeight) / xfactor)); - yfactor = xfactor; - } else { - targetResizeWidth = baton->width = static_cast(round(static_cast(inputWidth) / yfactor)); - xfactor = yfactor; - } - break; - case Canvas::MIN: - if (xfactor < yfactor) { - targetResizeHeight = baton->height = static_cast(round(static_cast(inputHeight) / xfactor)); - yfactor = xfactor; - } else { - targetResizeWidth = baton->width = static_cast(round(static_cast(inputWidth) / yfactor)); - xfactor = yfactor; - } - break; - case Canvas::IGNORE_ASPECT: - if (!baton->rotateBeforePreExtract && - (rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) { - std::swap(xfactor, yfactor); - } - break; - } - } else if (baton->width > 0) { - // Fixed width - xfactor = static_cast(inputWidth) / static_cast(baton->width); - if (baton->canvas == Canvas::IGNORE_ASPECT) { - targetResizeHeight = baton->height = inputHeight; - } else { - // Auto height - yfactor = xfactor; - targetResizeHeight = baton->height = static_cast(round(static_cast(inputHeight) / yfactor)); - } - } else if (baton->height > 0) { - // Fixed height - yfactor = static_cast(inputHeight) / static_cast(baton->height); - if (baton->canvas == Canvas::IGNORE_ASPECT) { - targetResizeWidth = baton->width = inputWidth; - } else { - // Auto width - xfactor = yfactor; - targetResizeWidth = baton->width = static_cast(round(static_cast(inputWidth) / xfactor)); - } - } else { - // Identity transform - baton->width = inputWidth; - baton->height = inputHeight; - } - // Calculate integral box shrink - int xshrink = std::max(1, static_cast(floor(xfactor))); - int yshrink = std::max(1, static_cast(floor(yfactor))); + // Swap input output width and height when rotating by 90 or 270 degrees + bool swap = !baton->rotateBeforePreExtract && (rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270); - // Calculate residual float affine transformation - double xresidual = static_cast(xshrink) / xfactor; - double yresidual = static_cast(yshrink) / yfactor; + // Shrink to pageHeight, so we work for multi-page images + std::tie(hshrink, vshrink) = sharp::ResolveShrink( + inputWidth, pageHeight, targetResizeWidth, targetResizeHeight, + baton->canvas, swap, baton->withoutEnlargement); - // If integral x and y shrink are equal, try to use shrink-on-load for JPEG and WebP, - // but not when applying gamma correction, pre-resize extract, trim or input colourspace - int shrink_on_load = 1; + // The jpeg preload shrink. + int jpegShrinkOnLoad = 1; - int shrink_on_load_factor = 1; - // Leave at least a factor of two for the final resize step, when fastShrinkOnLoad: false - // for more consistent results and avoid occasional small image shifting - if (!baton->fastShrinkOnLoad) { - shrink_on_load_factor = 2; - } - if ( - xshrink == yshrink && xshrink >= 2 * shrink_on_load_factor && - (inputImageType == sharp::ImageType::JPEG || inputImageType == sharp::ImageType::WEBP) && + // WebP, PDF, SVG scale + double scale = 1.0; + + // Try to reload input using shrink-on-load for JPEG, WebP, SVG and PDF, when: + // - the width or height parameters are specified; + // - gamma correction doesn't need to be applied; + // - trimming or pre-resize extract isn't required; + // - input colourspace is not specified; + bool const shouldPreShrink = (targetResizeWidth > 0 || targetResizeHeight > 0) && baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimThreshold == 0.0 && - baton->colourspaceInput == VIPS_INTERPRETATION_LAST && - image.width() > 3 && image.height() > 3 && baton->input->pages == 1 - ) { - if (xshrink >= 8 * shrink_on_load_factor) { - xfactor = xfactor / 8; - yfactor = yfactor / 8; - shrink_on_load = 8; - } else if (xshrink >= 4 * shrink_on_load_factor) { - xfactor = xfactor / 4; - yfactor = yfactor / 4; - shrink_on_load = 4; - } else if (xshrink >= 2 * shrink_on_load_factor) { - xfactor = xfactor / 2; - yfactor = yfactor / 2; - shrink_on_load = 2; + baton->colourspaceInput == VIPS_INTERPRETATION_LAST; + + if (shouldPreShrink) { + // The common part of the shrink: the bit by which both axes must be shrunk + double shrink = std::min(hshrink, vshrink); + + if (inputImageType == sharp::ImageType::JPEG) { + // Leave at least a factor of two for the final resize step, when fastShrinkOnLoad: false + // for more consistent results and avoid occasional small image shifting + int factor = baton->fastShrinkOnLoad ? 1 : 2; + if (shrink >= 8 * factor) { + jpegShrinkOnLoad = 8; + } else if (shrink >= 4 * factor) { + jpegShrinkOnLoad = 4; + } else if (shrink >= 2 * factor) { + jpegShrinkOnLoad = 2; + } + } else if (inputImageType == sharp::ImageType::WEBP || + inputImageType == sharp::ImageType::SVG || + inputImageType == sharp::ImageType::PDF) { + scale = 1.0 / shrink; } } - // Help ensure a final kernel-based reduction to prevent shrink aliasing - if (shrink_on_load > 1 && (xresidual == 1.0 || yresidual == 1.0)) { - shrink_on_load = shrink_on_load / 2; - xfactor = xfactor * 2; - yfactor = yfactor * 2; - } - if (shrink_on_load > 1) { - // Reload input using shrink-on-load + + // Reload input using shrink-on-load, it'll be an integer shrink + // factor for jpegload*, a double scale factor for webpload*, + // pdfload* and svgload* + if (jpegShrinkOnLoad > 1) { vips::VOption *option = VImage::option() ->set("access", baton->input->access) - ->set("shrink", shrink_on_load) + ->set("shrink", jpegShrinkOnLoad) ->set("fail", baton->input->failOnError); if (baton->input->buffer != nullptr) { + // Reload JPEG buffer VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength); - if (inputImageType == sharp::ImageType::JPEG) { - // Reload JPEG buffer - image = VImage::jpegload_buffer(blob, option); - } else { - // Reload WebP buffer - image = VImage::webpload_buffer(blob, option); - } + image = VImage::jpegload_buffer(blob, option); vips_area_unref(reinterpret_cast(blob)); } else { - if (inputImageType == sharp::ImageType::JPEG) { - // Reload JPEG file - image = VImage::jpegload(const_cast(baton->input->file.data()), option); + // Reload JPEG file + image = VImage::jpegload(const_cast(baton->input->file.data()), option); + } + } else if (scale != 1.0) { + vips::VOption *option = VImage::option() + ->set("access", baton->input->access) + ->set("scale", scale) + ->set("fail", baton->input->failOnError); + if (inputImageType == sharp::ImageType::WEBP) { + option->set("n", baton->input->pages); + option->set("page", baton->input->page); + + if (baton->input->buffer != nullptr) { + // Reload WebP buffer + VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength); + image = VImage::webpload_buffer(blob, option); + vips_area_unref(reinterpret_cast(blob)); } else { // Reload WebP file image = VImage::webpload(const_cast(baton->input->file.data()), option); } - } - // Recalculate integral shrink and double residual - int const shrunkOnLoadWidth = image.width(); - int const shrunkOnLoadHeight = image.height(); - if (!baton->rotateBeforePreExtract && - (rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) { - // Swap when rotating by 90 or 270 degrees - xfactor = static_cast(shrunkOnLoadWidth) / static_cast(targetResizeHeight); - yfactor = static_cast(shrunkOnLoadHeight) / static_cast(targetResizeWidth); - } else { - xfactor = static_cast(shrunkOnLoadWidth) / static_cast(targetResizeWidth); - yfactor = static_cast(shrunkOnLoadHeight) / static_cast(targetResizeHeight); + } else if (inputImageType == sharp::ImageType::SVG) { + option->set("unlimited", baton->input->unlimited); + option->set("dpi", baton->input->density); + + if (baton->input->buffer != nullptr) { + // Reload SVG buffer + VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength); + image = VImage::svgload_buffer(blob, option); + vips_area_unref(reinterpret_cast(blob)); + } else { + // Reload SVG file + image = VImage::svgload(const_cast(baton->input->file.data()), option); + } + + sharp::SetDensity(image, baton->input->density); + } else if (inputImageType == sharp::ImageType::PDF) { + option->set("n", baton->input->pages); + option->set("page", baton->input->page); + option->set("dpi", baton->input->density); + + if (baton->input->buffer != nullptr) { + // Reload PDF buffer + VipsBlob *blob = vips_blob_new(nullptr, baton->input->buffer, baton->input->bufferLength); + image = VImage::pdfload_buffer(blob, option); + vips_area_unref(reinterpret_cast(blob)); + } else { + // Reload PDF file + image = VImage::pdfload(const_cast(baton->input->file.data()), option); + } + + sharp::SetDensity(image, baton->input->density); } } - // Remove animation properties from single page images - if (baton->input->pages == 1) { - image = sharp::RemoveAnimationProperties(image); + + // Any pre-shrinking may already have been done + int thumbWidth = image.width(); + int thumbHeight = image.height(); + + // After pre-shrink, but before the main shrink stage + // Reuse the initial pageHeight if we didn't pre-shrink + int preshrunkPageHeight = shouldPreShrink ? sharp::GetPageHeight(image) : pageHeight; + + if (baton->fastShrinkOnLoad && jpegShrinkOnLoad > 1) { + // JPEG shrink-on-load rounds the output dimensions down, which + // may cause incorrect dimensions when fastShrinkOnLoad is enabled + // Just recalculate vshrink / hshrink on the main image instead of + // the pre-shrunk image when this is the case + hshrink = static_cast(thumbWidth) / (static_cast(inputWidth) / hshrink); + vshrink = static_cast(preshrunkPageHeight) / (static_cast(pageHeight) / vshrink); + } else { + // Shrink to preshrunkPageHeight, so we work for multi-page images + std::tie(hshrink, vshrink) = sharp::ResolveShrink( + thumbWidth, preshrunkPageHeight, targetResizeWidth, targetResizeHeight, + baton->canvas, swap, baton->withoutEnlargement); + } + + int targetHeight = static_cast(std::rint(static_cast(preshrunkPageHeight) / vshrink)); + int targetPageHeight = targetHeight; + + // In toilet-roll mode, we must adjust vshrink so that we exactly hit + // preshrunkPageHeight or we'll have pixels straddling pixel boundaries + if (thumbHeight > preshrunkPageHeight) { + targetHeight *= nPages; + vshrink = static_cast(thumbHeight) / targetHeight; } // Ensure we're using a device-independent colour space @@ -345,7 +331,7 @@ class PipelineWorker : public Napi::AsyncWorker { image = image.colourspace(VIPS_INTERPRETATION_B_W); } - bool const shouldResize = xfactor != 1.0 || yfactor != 1.0; + bool const shouldResize = hshrink != 1.0 || vshrink != 1.0; bool const shouldBlur = baton->blurSigma != 0.0; bool const shouldConv = baton->convKernelWidth * baton->convKernelHeight > 0; bool const shouldSharpen = baton->sharpenSigma != 0.0; @@ -379,21 +365,8 @@ class PipelineWorker : public Napi::AsyncWorker { ) { throw vips::VError("Unknown kernel"); } - // Ensure shortest edge is at least 1 pixel - if (image.width() / xfactor < 0.5) { - xfactor = 2 * image.width(); - if (baton->canvas != Canvas::EMBED) { - baton->width = 1; - } - } - if (image.height() / yfactor < 0.5) { - yfactor = 2 * image.height(); - if (baton->canvas != Canvas::EMBED) { - baton->height = 1; - } - } - image = image.resize(1.0 / xfactor, VImage::option() - ->set("vscale", 1.0 / yfactor) + image = image.resize(1.0 / hshrink, VImage::option() + ->set("vscale", 1.0 / vshrink) ->set("kernel", kernel)); } @@ -429,52 +402,67 @@ class PipelineWorker : public Napi::AsyncWorker { image = image.copy(VImage::option()->set("interpretation", baton->colourspace)); } + inputWidth = image.width(); + inputHeight = nPages > 1 ? targetPageHeight : image.height(); + + // Resolve dimensions + if (baton->width <= 0) { + baton->width = inputWidth; + } + if (baton->height <= 0) { + baton->height = inputHeight; + } + // Crop/embed - if (image.width() != baton->width || image.height() != baton->height) { - if (baton->canvas == Canvas::EMBED) { + if (inputWidth != baton->width || inputHeight != baton->height) { + if (baton->canvas == sharp::Canvas::EMBED) { std::vector background; std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground, shouldPremultiplyAlpha); // Embed - // Calculate where to position the embeded image if gravity specified, else center. + // Calculate where to position the embedded image if gravity specified, else center. int left; int top; - left = static_cast(round((baton->width - image.width()) / 2)); - top = static_cast(round((baton->height - image.height()) / 2)); + left = static_cast(round((baton->width - inputWidth) / 2)); + top = static_cast(round((baton->height - inputHeight) / 2)); - int width = std::max(image.width(), baton->width); - int height = std::max(image.height(), baton->height); + int width = std::max(inputWidth, baton->width); + int height = std::max(inputHeight, baton->height); std::tie(left, top) = sharp::CalculateEmbedPosition( - image.width(), image.height(), baton->width, baton->height, baton->position); + inputWidth, inputHeight, baton->width, baton->height, baton->position); - image = image.embed(left, top, width, height, VImage::option() - ->set("extend", VIPS_EXTEND_BACKGROUND) - ->set("background", background)); + image = nPages > 1 + ? sharp::EmbedMultiPage(image, + left, top, width, height, background, nPages, &targetPageHeight) + : image.embed(left, top, width, height, VImage::option() + ->set("extend", VIPS_EXTEND_BACKGROUND) + ->set("background", background)); + } else if (baton->canvas == sharp::Canvas::CROP) { + if (baton->width > inputWidth) { + baton->width = inputWidth; + } + if (baton->height > inputHeight) { + baton->height = inputHeight; + } - } else if ( - baton->canvas != Canvas::IGNORE_ASPECT && - (image.width() > baton->width || image.height() > baton->height) - ) { - // Crop/max/min + // Crop if (baton->position < 9) { // Gravity-based crop int left; int top; std::tie(left, top) = sharp::CalculateCrop( - image.width(), image.height(), baton->width, baton->height, baton->position); - int width = std::min(image.width(), baton->width); - int height = std::min(image.height(), baton->height); - image = image.extract_area(left, top, width, height); - } else { + inputWidth, inputHeight, baton->width, baton->height, baton->position); + int width = std::min(inputWidth, baton->width); + int height = std::min(inputHeight, baton->height); + + image = nPages > 1 + ? sharp::CropMultiPage(image, + left, top, width, height, nPages, &targetPageHeight) + : image.extract_area(left, top, width, height); + } else if (nPages == 1) { // Skip smart crop for multi-page images // Attention-based or Entropy-based crop - if (baton->width > image.width()) { - baton->width = image.width(); - } - if (baton->height > image.height()) { - baton->height = image.height(); - } image = image.tilecache(VImage::option() ->set("access", VIPS_ACCESS_RANDOM) ->set("threaded", TRUE)); @@ -496,8 +484,17 @@ class PipelineWorker : public Napi::AsyncWorker { // Post extraction if (baton->topOffsetPost != -1) { - image = image.extract_area( - baton->leftOffsetPost, baton->topOffsetPost, baton->widthPost, baton->heightPost); + if (nPages > 1) { + image = sharp::CropMultiPage(image, + baton->leftOffsetPost, baton->topOffsetPost, baton->widthPost, baton->heightPost, + nPages, &targetPageHeight); + + // heightPost is used in the info object, so update to reflect the number of pages + baton->heightPost *= nPages; + } else { + image = image.extract_area( + baton->leftOffsetPost, baton->topOffsetPost, baton->widthPost, baton->heightPost); + } } // Affine transform @@ -519,10 +516,13 @@ class PipelineWorker : public Napi::AsyncWorker { // Embed baton->width = image.width() + baton->extendLeft + baton->extendRight; - baton->height = image.height() + baton->extendTop + baton->extendBottom; + baton->height = (nPages > 1 ? targetPageHeight : image.height()) + baton->extendTop + baton->extendBottom; - image = image.embed(baton->extendLeft, baton->extendTop, baton->width, baton->height, - VImage::option()->set("extend", VIPS_EXTEND_BACKGROUND)->set("background", background)); + image = nPages > 1 + ? sharp::EmbedMultiPage(image, + baton->extendLeft, baton->extendTop, baton->width, baton->height, background, nPages, &targetPageHeight) + : image.embed(baton->extendLeft, baton->extendTop, baton->width, baton->height, + VImage::option()->set("extend", VIPS_EXTEND_BACKGROUND)->set("background", background)); } // Median - must happen before blurring, due to the utility of blurring after thresholding if (shouldApplyMedian) { @@ -763,10 +763,7 @@ class PipelineWorker : public Napi::AsyncWorker { baton->height = image.height(); image = sharp::SetAnimationProperties( - image, - baton->pageHeight, - baton->delay, - baton->loop); + image, nPages, targetPageHeight, baton->delay, baton->loop); // Output sharp::SetTimeout(image, baton->timeoutSeconds); @@ -1317,15 +1314,15 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) { // Canvas option std::string canvas = sharp::AttrAsStr(options, "canvas"); if (canvas == "crop") { - baton->canvas = Canvas::CROP; + baton->canvas = sharp::Canvas::CROP; } else if (canvas == "embed") { - baton->canvas = Canvas::EMBED; + baton->canvas = sharp::Canvas::EMBED; } else if (canvas == "max") { - baton->canvas = Canvas::MAX; + baton->canvas = sharp::Canvas::MAX; } else if (canvas == "min") { - baton->canvas = Canvas::MIN; + baton->canvas = sharp::Canvas::MIN; } else if (canvas == "ignore_aspect") { - baton->canvas = Canvas::IGNORE_ASPECT; + baton->canvas = sharp::Canvas::IGNORE_ASPECT; } // Tint chroma baton->tintA = sharp::AttrAsDouble(options, "tintA"); @@ -1520,10 +1517,7 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) { vips_enum_from_nick(nullptr, VIPS_TYPE_BAND_FORMAT, sharp::AttrAsStr(options, "rawDepth").data())); - // Animated output - if (sharp::HasAttr(options, "pageHeight")) { - baton->pageHeight = sharp::AttrAsUint32(options, "pageHeight"); - } + // Animated output properties if (sharp::HasAttr(options, "loop")) { baton->loop = sharp::AttrAsUint32(options, "loop"); } diff --git a/src/pipeline.h b/src/pipeline.h index d461a64d..4f048ced 100644 --- a/src/pipeline.h +++ b/src/pipeline.h @@ -27,14 +27,6 @@ Napi::Value pipeline(const Napi::CallbackInfo& info); -enum class Canvas { - CROP, - EMBED, - MAX, - MIN, - IGNORE_ASPECT -}; - struct Composite { sharp::InputDescriptor *input; VipsBlendMode mode; @@ -75,7 +67,7 @@ struct PipelineBaton { int width; int height; int channels; - Canvas canvas; + sharp::Canvas canvas; int position; std::vector resizeBackground; bool hasCropOffset; @@ -200,7 +192,6 @@ struct PipelineBaton { double ensureAlpha; VipsInterpretation colourspaceInput; VipsInterpretation colourspace; - int pageHeight; std::vector delay; int loop; int tileSize; @@ -221,7 +212,7 @@ struct PipelineBaton { topOffsetPre(-1), topOffsetPost(-1), channels(0), - canvas(Canvas::CROP), + canvas(sharp::Canvas::CROP), position(0), resizeBackground{ 0.0, 0.0, 0.0, 255.0 }, hasCropOffset(false), @@ -334,7 +325,6 @@ struct PipelineBaton { ensureAlpha(-1.0), colourspaceInput(VIPS_INTERPRETATION_LAST), colourspace(VIPS_INTERPRETATION_LAST), - pageHeight(0), delay{-1}, loop(-1), tileSize(256), diff --git a/test/fixtures/expected/clahe-11-25-14.jpg b/test/fixtures/expected/clahe-11-25-14.jpg index 3ed0f51a..067b79d8 100644 Binary files a/test/fixtures/expected/clahe-11-25-14.jpg and b/test/fixtures/expected/clahe-11-25-14.jpg differ diff --git a/test/fixtures/expected/embed-animated-height.webp b/test/fixtures/expected/embed-animated-height.webp new file mode 100644 index 00000000..a546307d Binary files /dev/null and b/test/fixtures/expected/embed-animated-height.webp differ diff --git a/test/fixtures/expected/embed-animated-width.webp b/test/fixtures/expected/embed-animated-width.webp new file mode 100644 index 00000000..e11efa68 Binary files /dev/null and b/test/fixtures/expected/embed-animated-width.webp differ diff --git a/test/fixtures/expected/extend-equal-single.webp b/test/fixtures/expected/extend-equal-single.webp new file mode 100644 index 00000000..95fab397 Binary files /dev/null and b/test/fixtures/expected/extend-equal-single.webp differ diff --git a/test/fixtures/expected/extract-lch.jpg b/test/fixtures/expected/extract-lch.jpg index e922b78a..f5114035 100644 Binary files a/test/fixtures/expected/extract-lch.jpg and b/test/fixtures/expected/extract-lch.jpg differ diff --git a/test/fixtures/expected/gravity-center-height.webp b/test/fixtures/expected/gravity-center-height.webp new file mode 100644 index 00000000..70fbd1de Binary files /dev/null and b/test/fixtures/expected/gravity-center-height.webp differ diff --git a/test/fixtures/expected/gravity-center-width.webp b/test/fixtures/expected/gravity-center-width.webp new file mode 100644 index 00000000..e885bb41 Binary files /dev/null and b/test/fixtures/expected/gravity-center-width.webp differ diff --git a/test/fixtures/expected/hilutite.jpg b/test/fixtures/expected/hilutite.jpg index ed6d06a6..6ae6a71c 100644 Binary files a/test/fixtures/expected/hilutite.jpg and b/test/fixtures/expected/hilutite.jpg differ diff --git a/test/fixtures/expected/icc-cmyk.jpg b/test/fixtures/expected/icc-cmyk.jpg index 248070c6..66f1f599 100644 Binary files a/test/fixtures/expected/icc-cmyk.jpg and b/test/fixtures/expected/icc-cmyk.jpg differ diff --git a/test/fixtures/expected/resize-crop-extract.jpg b/test/fixtures/expected/resize-crop-extract.jpg index 166e5e96..e596b9a0 100644 Binary files a/test/fixtures/expected/resize-crop-extract.jpg and b/test/fixtures/expected/resize-crop-extract.jpg differ diff --git a/test/fixtures/expected/svg72.png b/test/fixtures/expected/svg72.png index a7833dcf..01d0586a 100644 Binary files a/test/fixtures/expected/svg72.png and b/test/fixtures/expected/svg72.png differ diff --git a/test/fixtures/expected/tint-sepia.jpg b/test/fixtures/expected/tint-sepia.jpg index b344a482..3c8af2c5 100644 Binary files a/test/fixtures/expected/tint-sepia.jpg and b/test/fixtures/expected/tint-sepia.jpg differ diff --git a/test/unit/avif.js b/test/unit/avif.js index 98c37702..b2def445 100644 --- a/test/unit/avif.js +++ b/test/unit/avif.js @@ -27,7 +27,7 @@ describe('AVIF', () => { format: 'jpeg', hasAlpha: false, hasProfile: false, - height: 13, + height: 14, isProgressive: false, space: 'srgb', width: 32 @@ -50,7 +50,6 @@ describe('AVIF', () => { hasProfile: false, height: 26, isProgressive: false, - pageHeight: 26, pagePrimary: 0, pages: 1, space: 'srgb', @@ -71,9 +70,8 @@ describe('AVIF', () => { format: 'heif', hasAlpha: false, hasProfile: false, - height: 12, + height: 14, isProgressive: false, - pageHeight: 12, pagePrimary: 0, pages: 1, space: 'srgb', @@ -97,7 +95,6 @@ describe('AVIF', () => { hasProfile: false, height: 300, isProgressive: false, - pageHeight: 300, pagePrimary: 0, pages: 1, space: 'srgb', diff --git a/test/unit/extend.js b/test/unit/extend.js index ea491e12..9e49ce51 100644 --- a/test/unit/extend.js +++ b/test/unit/extend.js @@ -6,16 +6,30 @@ const sharp = require('../../'); const fixtures = require('../fixtures'); describe('Extend', function () { - it('extend all sides equally via a single value', function (done) { - sharp(fixtures.inputJpg) - .resize(120) - .extend(10) - .toBuffer(function (err, data, info) { - if (err) throw err; - assert.strictEqual(140, info.width); - assert.strictEqual(118, info.height); - fixtures.assertSimilar(fixtures.expected('extend-equal-single.jpg'), data, done); - }); + describe('extend all sides equally via a single value', function () { + it('JPEG', function (done) { + sharp(fixtures.inputJpg) + .resize(120) + .extend(10) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(140, info.width); + assert.strictEqual(118, info.height); + fixtures.assertSimilar(fixtures.expected('extend-equal-single.jpg'), data, done); + }); + }); + + it('Animated WebP', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .resize(120) + .extend(10) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(140, info.width); + assert.strictEqual(140 * 9, info.height); + fixtures.assertSimilar(fixtures.expected('extend-equal-single.webp'), data, done); + }); + }); }); it('extend all sides equally with RGB', function (done) { diff --git a/test/unit/extract.js b/test/unit/extract.js index 05d67bf1..442638dc 100644 --- a/test/unit/extract.js +++ b/test/unit/extract.js @@ -39,10 +39,35 @@ describe('Partial image extraction', function () { }); }); + describe('Animated WebP', function () { + it('Before resize', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .extract({ left: 0, top: 30, width: 80, height: 20 }) + .resize(320, 80) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(320, info.width); + assert.strictEqual(80 * 9, info.height); + fixtures.assertSimilar(fixtures.expected('gravity-center-height.webp'), data, done); + }); + }); + + it('After resize', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .resize(320, 320) + .extract({ left: 0, top: 120, width: 320, height: 80 }) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(320, info.width); + assert.strictEqual(80 * 9, info.height); + fixtures.assertSimilar(fixtures.expected('gravity-center-height.webp'), data, done); + }); + }); + }); + it('TIFF', function (done) { sharp(fixtures.inputTiff) .extract({ left: 34, top: 63, width: 341, height: 529 }) - .jpeg() .toBuffer(function (err, data, info) { if (err) throw err; assert.strictEqual(341, info.width); diff --git a/test/unit/gif.js b/test/unit/gif.js index 0cade87b..d44b54f9 100644 --- a/test/unit/gif.js +++ b/test/unit/gif.js @@ -80,12 +80,6 @@ describe('GIF input', () => { assert.strictEqual(true, reduced.length < original.length); }); - it('invalid pageHeight throws', () => { - assert.throws(() => { - sharp().gif({ pageHeight: 0 }); - }); - }); - it('invalid loop throws', () => { assert.throws(() => { sharp().gif({ loop: -1 }); @@ -97,7 +91,7 @@ describe('GIF input', () => { it('invalid delay throws', () => { assert.throws(() => { - sharp().gif({ delay: [-1] }); + sharp().gif({ delay: -1 }); }); assert.throws(() => { sharp().gif({ delay: [65536] }); diff --git a/test/unit/metadata.js b/test/unit/metadata.js index 5fcb55f2..76a842a9 100644 --- a/test/unit/metadata.js +++ b/test/unit/metadata.js @@ -194,6 +194,29 @@ describe('Image metadata', function () { it('Animated WebP', () => sharp(fixtures.inputWebPAnimated) + .metadata() + .then(({ + format, width, height, space, channels, depth, + isProgressive, pages, loop, delay, hasProfile, + hasAlpha + }) => { + assert.strictEqual(format, 'webp'); + assert.strictEqual(width, 80); + assert.strictEqual(height, 80); + assert.strictEqual(space, 'srgb'); + assert.strictEqual(channels, 4); + assert.strictEqual(depth, 'uchar'); + assert.strictEqual(isProgressive, false); + assert.strictEqual(pages, 9); + assert.strictEqual(loop, 0); + assert.deepStrictEqual(delay, [120, 120, 90, 120, 120, 90, 120, 90, 30]); + assert.strictEqual(hasProfile, false); + assert.strictEqual(hasAlpha, true); + }) + ); + + it('Animated WebP with all pages', () => + sharp(fixtures.inputWebPAnimated, { pages: -1 }) .metadata() .then(({ format, width, height, space, channels, depth, @@ -202,7 +225,7 @@ describe('Image metadata', function () { }) => { assert.strictEqual(format, 'webp'); assert.strictEqual(width, 80); - assert.strictEqual(height, 80); + assert.strictEqual(height, 720); assert.strictEqual(space, 'srgb'); assert.strictEqual(channels, 4); assert.strictEqual(depth, 'uchar'); @@ -221,8 +244,8 @@ describe('Image metadata', function () { .metadata() .then(({ format, width, height, space, channels, depth, - isProgressive, pages, pageHeight, loop, delay, - hasProfile, hasAlpha + isProgressive, pages, loop, delay, hasProfile, + hasAlpha }) => { assert.strictEqual(format, 'webp'); assert.strictEqual(width, 370); @@ -232,7 +255,6 @@ describe('Image metadata', function () { assert.strictEqual(depth, 'uchar'); assert.strictEqual(isProgressive, false); assert.strictEqual(pages, 10); - assert.strictEqual(pageHeight, 285); assert.strictEqual(loop, 3); assert.deepStrictEqual(delay, [...Array(9).fill(3000), 15000]); assert.strictEqual(hasProfile, false); @@ -285,8 +307,8 @@ describe('Image metadata', function () { .metadata() .then(({ format, width, height, space, channels, depth, - isProgressive, pages, pageHeight, loop, delay, - background, hasProfile, hasAlpha + isProgressive, pages, loop, delay, background, + hasProfile, hasAlpha }) => { assert.strictEqual(format, 'gif'); assert.strictEqual(width, 80); @@ -296,7 +318,6 @@ describe('Image metadata', function () { assert.strictEqual(depth, 'uchar'); assert.strictEqual(isProgressive, false); assert.strictEqual(pages, 30); - assert.strictEqual(pageHeight, 80); assert.strictEqual(loop, 0); assert.deepStrictEqual(delay, Array(30).fill(30)); assert.deepStrictEqual(background, { r: 0, g: 0, b: 0 }); @@ -310,8 +331,8 @@ describe('Image metadata', function () { .metadata() .then(({ format, width, height, space, channels, depth, - isProgressive, pages, pageHeight, loop, delay, - hasProfile, hasAlpha + isProgressive, pages, loop, delay, hasProfile, + hasAlpha }) => { assert.strictEqual(format, 'gif'); assert.strictEqual(width, 370); @@ -321,7 +342,6 @@ describe('Image metadata', function () { assert.strictEqual(depth, 'uchar'); assert.strictEqual(isProgressive, false); assert.strictEqual(pages, 10); - assert.strictEqual(pageHeight, 285); assert.strictEqual(loop, 2); assert.deepStrictEqual(delay, [...Array(9).fill(3000), 15000]); assert.strictEqual(hasProfile, false); @@ -522,7 +542,7 @@ describe('Image metadata', function () { assert.strictEqual('Relative', profile.intent); assert.strictEqual('Printer', profile.deviceClass); }); - fixtures.assertSimilar(output, fixtures.path('expected/icc-cmyk.jpg'), { threshold: 0 }, done); + fixtures.assertSimilar(output, fixtures.expected('icc-cmyk.jpg'), { threshold: 0 }, done); }); }); @@ -533,7 +553,7 @@ describe('Image metadata', function () { .withMetadata({ icc: fixtures.path('hilutite.icm') }) .toFile(output, function (err, info) { if (err) throw err; - fixtures.assertMaxColourDistance(output, fixtures.path('expected/hilutite.jpg'), 9); + fixtures.assertMaxColourDistance(output, fixtures.expected('hilutite.jpg'), 9); done(); }); }); @@ -737,7 +757,6 @@ describe('Image metadata', function () { depth: 'uchar', isProgressive: false, pages: 1, - pageHeight: 858, pagePrimary: 0, compression: 'av1', hasProfile: false, diff --git a/test/unit/resize-contain.js b/test/unit/resize-contain.js index d443ed2a..7a0a2e92 100644 --- a/test/unit/resize-contain.js +++ b/test/unit/resize-contain.js @@ -148,6 +148,42 @@ describe('Resize fit=contain', function () { }); }); + describe('Animated WebP', function () { + it('Width only', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .resize(320, 240, { + fit: 'contain', + background: { r: 255, g: 0, b: 0 } + }) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(true, data.length > 0); + assert.strictEqual('webp', info.format); + assert.strictEqual(320, info.width); + assert.strictEqual(240 * 9, info.height); + assert.strictEqual(4, info.channels); + fixtures.assertSimilar(fixtures.expected('embed-animated-width.webp'), data, done); + }); + }); + + it('Height only', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .resize(240, 320, { + fit: 'contain', + background: { r: 255, g: 0, b: 0 } + }) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(true, data.length > 0); + assert.strictEqual('webp', info.format); + assert.strictEqual(240, info.width); + assert.strictEqual(320 * 9, info.height); + assert.strictEqual(4, info.channels); + fixtures.assertSimilar(fixtures.expected('embed-animated-height.webp'), data, done); + }); + }); + }); + it('Invalid position values should fail', function () { [-1, 8.1, 9, 1000000, false, 'vallejo'].forEach(function (position) { assert.throws(function () { diff --git a/test/unit/resize-cover.js b/test/unit/resize-cover.js index 4d780000..75d9a73a 100644 --- a/test/unit/resize-cover.js +++ b/test/unit/resize-cover.js @@ -269,6 +269,30 @@ describe('Resize fit=cover', function () { }); }); + describe('Animated WebP', function () { + it('Width only', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .resize(80, 320, { fit: sharp.fit.cover }) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(80, info.width); + assert.strictEqual(320 * 9, info.height); + fixtures.assertSimilar(fixtures.expected('gravity-center-width.webp'), data, done); + }); + }); + + it('Height only', function (done) { + sharp(fixtures.inputWebPAnimated, { pages: -1 }) + .resize(320, 80, { fit: sharp.fit.cover }) + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual(320, info.width); + assert.strictEqual(80 * 9, info.height); + fixtures.assertSimilar(fixtures.expected('gravity-center-height.webp'), data, done); + }); + }); + }); + describe('Entropy-based strategy', function () { it('JPEG', function (done) { sharp(fixtures.inputJpg) diff --git a/test/unit/svg.js b/test/unit/svg.js index fd436e27..ee832352 100644 --- a/test/unit/svg.js +++ b/test/unit/svg.js @@ -74,6 +74,27 @@ describe('SVG input', function () { }); }); + it('Convert SVG to PNG utilizing scale-on-load', function (done) { + const size = 1024; + sharp(fixtures.inputSvgSmallViewBox) + .resize(size) + .toFormat('png') + .toBuffer(function (err, data, info) { + if (err) throw err; + assert.strictEqual('png', info.format); + assert.strictEqual(size, info.width); + assert.strictEqual(size, info.height); + fixtures.assertSimilar(fixtures.expected('circle.png'), data, function (err) { + if (err) throw err; + sharp(data).metadata(function (err, info) { + if (err) throw err; + assert.strictEqual(72, info.density); + done(); + }); + }); + }); + }); + it('Convert SVG to PNG at 14.4DPI', function (done) { sharp(fixtures.inputSvg, { density: 14.4 }) .toFormat('png') diff --git a/test/unit/webp.js b/test/unit/webp.js index 00815681..3e122e8b 100644 --- a/test/unit/webp.js +++ b/test/unit/webp.js @@ -133,12 +133,6 @@ describe('WebP', function () { }); }); - it('invalid pageHeight throws', () => { - assert.throws(() => { - sharp().webp({ pageHeight: 0 }); - }); - }); - it('invalid loop throws', () => { assert.throws(() => { sharp().webp({ loop: -1 }); @@ -151,7 +145,7 @@ describe('WebP', function () { it('invalid delay throws', () => { assert.throws(() => { - sharp().webp({ delay: [-1] }); + sharp().webp({ delay: -1 }); }); assert.throws(() => { @@ -159,16 +153,13 @@ describe('WebP', function () { }); }); - it('should double the number of frames with default delay', async () => { - const original = await sharp(fixtures.inputWebPAnimated, { pages: -1 }).metadata(); + it('should repeat a single delay for all frames', async () => { const updated = await sharp(fixtures.inputWebPAnimated, { pages: -1 }) - .webp({ pageHeight: original.pageHeight / 2 }) + .webp({ delay: 100 }) .toBuffer() .then(data => sharp(data, { pages: -1 }).metadata()); - assert.strictEqual(updated.pages, original.pages * 2); - assert.strictEqual(updated.pageHeight, original.pageHeight / 2); - assert.deepStrictEqual(updated.delay, [...original.delay, ...Array(9).fill(120)]); + assert.deepStrictEqual(updated.delay, Array(updated.pages).fill(100)); }); it('should limit animation loop', async () => { @@ -216,22 +207,14 @@ describe('WebP', function () { }); }); - it('should remove animation properties when loading single page', async () => { - const data = await sharp(fixtures.inputGifAnimatedLoop3) + it('should resize animated image to page height', async () => { + const updated = await sharp(fixtures.inputWebPAnimated, { pages: -1 }) .resize({ height: 570 }) .webp({ effort: 0 }) - .toBuffer(); - const { size, ...metadata } = await sharp(data).metadata(); - assert.deepStrictEqual(metadata, { - format: 'webp', - width: 740, - height: 570, - space: 'srgb', - channels: 3, - depth: 'uchar', - isProgressive: false, - hasProfile: false, - hasAlpha: false - }); + .toBuffer() + .then(data => sharp(data, { pages: -1 }).metadata()); + + assert.strictEqual(updated.height, 570 * 9); + assert.strictEqual(updated.pageHeight, 570); }); });