Compare commits

...

77 Commits

Author SHA1 Message Date
Lovell Fuller
1ff84b20b7 Release v0.29.3 2021-11-14 11:40:19 +00:00
Lovell Fuller
97655d2dfd Bump deps 2021-11-14 09:17:44 +00:00
Michael B. Klein
d10d7b02d4 Docs: remove duplicate entry for mbklein (#2971) 2021-11-11 19:10:44 +00:00
Lovell Fuller
2ffdae2914 Docs: changelog and credit for #2952 2021-11-08 19:43:49 +00:00
Michael B. Klein
342de36973 Impute TIFF xres/yres from withMetadata({density}) 2021-11-08 19:43:42 +00:00
Lovell Fuller
b33231d4bd Ensure correct dimensions when contain 1px image #2951 2021-11-07 16:35:30 +00:00
Lovell Fuller
319db21f29 Release v0.29.2 2021-10-21 09:15:21 +01:00
Lovell Fuller
d359331426 Remove animation props from single page images #2890 2021-10-18 20:27:10 +01:00
Lovell Fuller
7ae151362b Bump devDeps 2021-10-17 15:17:50 +01:00
Lovell Fuller
648a1e05da Throw error rather than exit for invalid binaries #2931 2021-10-17 15:14:40 +01:00
Lovell Fuller
b9f211fe34 Docs: changelog for #2918 2021-10-17 15:11:38 +01:00
Dmitri Pyatkov
e475d9e47f Improve error message on Windows for version conflict (#2918) 2021-10-17 14:10:28 +01:00
Lovell Fuller
f37ca8249a Bump deps 2021-09-22 11:41:22 +01:00
Lovell Fuller
1dd4be670d Add timeout function to limit processing time 2021-09-22 10:33:59 +01:00
Lovell Fuller
197d4cf835 Docs: changelog and credit for #2893 2021-09-22 10:31:12 +01:00
Lovell Fuller
83eed86b53 Docs: clarify prebuilt libc support on ARMv6/v7 2021-09-22 10:08:52 +01:00
Lovell Fuller
bbf612cb9e Replace use of deprecated util.inherits 2021-09-22 10:08:44 +01:00
Erlend
2679bb567b Allow use of 'tif' to select TIFF output (#2893) 2021-09-16 18:49:14 +01:00
Lovell Fuller
481e350f39 Ensure 'versions' is populated from vendored libvips 2021-09-07 11:21:00 +01:00
Lovell Fuller
50c7a08754 Release v0.29.1 2021-09-07 10:23:50 +01:00
Lovell Fuller
9a0bb60737 Bump deps 2021-09-07 10:21:51 +01:00
Lovell Fuller
deb5d81221 Docs: changelog entries for #2878 #2879 2021-09-06 16:30:31 +01:00
Espen Hovlandsdal
916b04dbac Allow using speed 9 for AVIF/HEIC encoding (#2879) 2021-09-06 16:23:02 +01:00
Espen Hovlandsdal
52307fad5d Resolve paths before comparing input/output destination (#2878)
This fixes an issue where if you try to write to the same destination as the
input file but you are not using absolute (or the same relative path) for both
the input and output, sharp/vips might produce errors such as:

someFile.jpg: unable to open for write
unix error: No such file or directory
2021-09-06 16:21:43 +01:00
Lovell Fuller
afb21135c2 Docs: add changelog entry for #2868 2021-09-05 09:35:46 +01:00
Zaruike
b7fbffb3f7 Add support for libvips compiled with OpenJPEG 2021-09-05 09:32:02 +01:00
Lovell Fuller
5d98bcd8d8 Remove unsupported animation props from AVIF #2870 2021-09-05 08:46:15 +01:00
Lovell Fuller
e044788f63 Docs: changelog and credit for #2846 2021-08-30 20:31:10 +01:00
Tenpi
4a9267ce12 Add lightness option to modulate operation 2021-08-30 20:22:41 +01:00
Lovell Fuller
104464c2e0 Ensure images with P3 profiles retain full gamut #2862 2021-08-30 17:15:17 +01:00
Lovell Fuller
60adc110f5 Ensure background is premultiplied when compositing #2858 2021-08-29 16:40:40 +01:00
Paul Straw
2031d7d112 Ensure compatibility with ImageMagick 7 (#2865) 2021-08-28 20:17:44 +01:00
Lovell Fuller
3402656ec5 Set PNG bitdepth based on number of colours #2855
Removes use of deprecated libvips API
2021-08-26 22:05:29 +01:00
Lovell Fuller
4e84f743e4 Docs: toFile expects directory structure to exist 2021-08-20 09:22:22 +01:00
Lovell Fuller
17e50de5f0 Docs: serve docute from same hostname
Cheapo corporate web proxies ignore CSP and rewrite HTML
2021-08-19 18:58:17 +01:00
Lovell Fuller
978a788f40 CI: ensure Linux ARM64 prebuild token is passed into container 2021-08-17 14:49:57 +01:00
Lovell Fuller
6e91d55971 CI: ensure Linux ARM64 prebuild token is passed into container 2021-08-17 14:33:29 +01:00
Lovell Fuller
d4ce0a1e36 Update prebuild include regex 2021-08-17 11:23:24 +01:00
Lovell Fuller
148608b377 Release v0.29.0 2021-08-17 11:16:04 +01:00
Lovell Fuller
f725f4acb7 Update performance test results 2021-08-17 10:23:45 +01:00
Lovell Fuller
d07a549438 Tests: add squoosh-cli and squoosh-lib to performance benchmarks 2021-08-16 20:33:33 +01:00
Lovell Fuller
551441cedd Bench: bump deps 2021-08-16 19:36:42 +01:00
Lovell Fuller
46c14e939b Tests: add a few new leak suppressions 2021-08-16 19:01:39 +01:00
Lovell Fuller
6084647795 Upgrade to libvips v8.11.3 2021-08-15 19:45:08 +01:00
Lovell Fuller
e0a598ae62 Bump deps 2021-08-15 19:28:49 +01:00
Lovell Fuller
28833eb04a Upgrade to libvips v8.11.3-alpha1 2021-08-15 08:35:27 +01:00
Lovell Fuller
b24c9c86d1 Docs: changelog and credit for #2762 2021-08-03 15:28:50 +01:00
Mart
b7add480c7 Add support for bit depth with raw input and output (#2762)
* Determine input raw pixel depth from the given typed array
* Allow pixel depth to be set on raw output
2021-08-03 14:52:54 +01:00
Lovell Fuller
eabb671b10 Docs: minimum Node.js version requirement of 12.13.0 2021-08-03 10:25:04 +01:00
Lovell Fuller
513ed02b76 Docs: changelog entry for #2808 2021-08-02 21:37:54 +01:00
Espen Hovlandsdal
b7ddbe71f7 Add support for negating only non-alpha channels
Fixes #1035
2021-08-02 21:19:56 +01:00
Lovell Fuller
21d1a7ca62 CI: Add darwin-arm64 via MacStadium-based runner 2021-07-23 10:58:00 +01:00
Lovell Fuller
4c2d28a7ad Bump dep: color 2021-07-19 15:30:12 +01:00
Lovell Fuller
2afec9e3ed Docs: rebuild to include commit 6979042 2021-07-19 15:30:01 +01:00
reiv
69790421b7 Docs: add stats usage note and example (#2803) (#2804)
Clarify that stats are derived from the original input image and that operations need to be buffered in order to obtain stats from the resulting image.
2021-07-19 15:25:36 +01:00
Lovell Fuller
3f08f6a359 Add default background metadata for PNG and GIF images 2021-07-19 14:55:22 +01:00
Lovell Fuller
719c2db8da Bump devDeps 2021-07-14 19:33:24 +01:00
Lovell Fuller
a9aa55c32d Ensure pipelineColourspace is applied to all inputs #2704 2021-07-14 19:22:31 +01:00
Lovell Fuller
3f6d9d6ee9 Upgrade to libvips v8.11.2-alpha1 2021-07-14 13:45:47 +01:00
Lovell Fuller
b32568159f Docs: changelog and credit for #2704 2021-07-14 13:45:47 +01:00
Daiz
bb48d0d857 Add pipelineColourspace operator 2021-07-14 13:45:47 +01:00
Lovell Fuller
536412515f Allow installation directory to contain spaces
This previously worked, but regressed in v0.26.0.
2021-07-14 13:45:47 +01:00
Lovell Fuller
fcc6eaadd3 Tests: reduce WebP CPU time, ~2s faster 2021-07-14 13:45:47 +01:00
Lovell Fuller
7dc78e8796 Bump deps 2021-07-14 13:45:47 +01:00
Lovell Fuller
c65de3fe6d Default to single-channel output from extractChannel #2658 2021-07-14 13:45:47 +01:00
Lovell Fuller
d000f57773 Add compression property to HEIF image metadata #2504 2021-07-14 13:45:47 +01:00
Lovell Fuller
75cddbdb6d Default AVIF encoding to 4:4:4 chroma subsampling #2562 2021-07-14 13:45:47 +01:00
Lovell Fuller
e418d91511 Test: correct coverage syntax 2021-07-14 13:45:47 +01:00
Lovell Fuller
6c2e6c5432 Install: multiple platform-arch binaries in same tree 2021-07-14 13:45:47 +01:00
Lovell Fuller
8c6d9fdc62 Install: always use Brotli-compressed tarballs 2021-07-14 13:45:47 +01:00
Lovell Fuller
468e95427e CI: migrate ARM64 builds from TravisCI to CircleCI #2665 2021-07-14 13:45:47 +01:00
Lovell Fuller
6d7a5ace6b Drop support for Node.js 10, upgrade to Node-API v5 2021-07-14 13:45:47 +01:00
Lovell Fuller
cbaec198a5 Upgrade to libvips v8.11.0-rc1 2021-07-14 13:45:47 +01:00
Lovell Fuller
7467fa8b50 Docs: serve markdown files via Firebase instead of jsdelivr
Might make a few corporate web proxies happier
2021-06-21 17:10:44 +01:00
Lovell Fuller
61640fb5c7 CI: use gcc 10 on Linux x64 2021-06-03 18:52:52 +01:00
Lovell Fuller
19cb4b62b0 Docs: add security-related response headers 2021-06-02 09:12:02 +01:00
Lovell Fuller
81ee8bc30f Docs: make keyword search case insensitive 2021-06-02 09:11:08 +01:00
80 changed files with 2058 additions and 545 deletions

77
.circleci/config.yml Normal file
View File

@@ -0,0 +1,77 @@
version: 2.1
workflows:
build:
jobs:
- linux-arm64-glibc-node-12:
filters:
tags:
only: /^v.*/
- linux-arm64-musl-node-12:
filters:
tags:
only: /^v.*/
- linux-arm64-glibc-node-16:
filters:
tags:
only: /^v.*/
- linux-arm64-musl-node-16:
filters:
tags:
only: /^v.*/
jobs:
linux-arm64-glibc-node-12:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_12.x sid main' >/etc/apt/sources.list.d/nodesource.list"
sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"
- run: "[[ -n $CIRCLE_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 5 --upload=$prebuild_upload\" || true"
linux-arm64-glibc-node-16:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo chown 0.0 ${PWD}
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_16.x sid main' >/etc/apt/sources.list.d/nodesource.list"
sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"
linux-arm64-musl-node-12:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:12-alpine3.11
sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"
- run: "[[ -n $CIRCLE_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 5 --upload=$prebuild_upload\" || true"
linux-arm64-musl-node-16:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo chown 0.0 ${PWD}
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:16-alpine3.11
sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"

27
.github/workflows/ci-darwin-arm64v8.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: CI (MacStadium)
on:
- push
- pull_request
jobs:
CI:
runs-on: macos-m1
defaults:
run:
shell: /usr/bin/arch -arch arm64e /bin/bash -l {0}
steps:
- name: Dependencies
uses: actions/setup-node@v2
with:
node-version: 16
architecture: arm64
- name: Checkout
uses: actions/checkout@v2
- name: Install
run: npm install --build-from-source --unsafe-perm
- name: Test
run: npm test
- name: Prebuild
if: startsWith(github.ref, 'refs/tags/')
env:
prebuild_upload: ${{ secrets.GITHUB_TOKEN }}
run: npx prebuild --runtime napi --target 5

View File

@@ -1,3 +1,4 @@
name: CI (GitHub)
on:
- push
- pull_request
@@ -11,43 +12,34 @@ jobs:
include:
- os: ubuntu-20.04
container: centos:7
nodejs_version: 10
nodejs_version: 12
coverage: true
prebuild: true
- os: ubuntu-20.04
container: centos:7
nodejs_version: 12
- os: ubuntu-20.04
container: centos:7
nodejs_version: 14
- os: ubuntu-20.04
container: centos:7
nodejs_version: 16
- os: ubuntu-20.04
container: node:10-alpine3.11
prebuild: true
- os: ubuntu-20.04
container: node:12-alpine3.11
prebuild: true
- os: ubuntu-20.04
container: node:14-alpine3.11
- os: ubuntu-20.04
container: node:14-alpine3.13
- os: ubuntu-20.04
container: node:16-alpine3.11
- os: macos-10.15
nodejs_version: 10
prebuild: true
- os: macos-10.15
nodejs_version: 12
prebuild: true
- os: macos-10.15
nodejs_version: 14
- os: macos-10.15
nodejs_version: 16
- os: windows-2019
nodejs_version: 10
prebuild: true
- os: windows-2019
nodejs_version: 12
prebuild: true
- os: windows-2019
nodejs_version: 14
- os: windows-2019
@@ -57,7 +49,9 @@ jobs:
if: contains(matrix.container, 'centos')
run: |
curl -sL https://rpm.nodesource.com/setup_${{ matrix.nodejs_version }}.x | bash -
yum install -y gcc-c++ make git python3 nodejs
yum install -y centos-release-scl
yum install -y devtoolset-10-gcc-c++ make git python3 nodejs
echo "/opt/rh/devtoolset-10/root/usr/bin" >> $GITHUB_PATH
- name: Dependencies (Linux musl)
if: contains(matrix.container, 'alpine')
run: apk add build-base git python3 --update-cache
@@ -84,4 +78,4 @@ jobs:
if: matrix.prebuild && startsWith(github.ref, 'refs/tags/')
env:
prebuild_upload: ${{ secrets.GITHUB_TOKEN }}
run: npx prebuild --runtime napi --target 3
run: npx prebuild --runtime napi --target 5

View File

@@ -1,4 +1,4 @@
{
"include-regex": "(sharp\\.node|libvips-cpp\\.dll)",
"include-regex": "(sharp-.+\\.node|libvips-cpp\\.dll)",
"strip": true
}

View File

@@ -1,108 +0,0 @@
jobs:
include:
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 10"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_10.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs=10.*"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
after_success: "[[ -n $TRAVIS_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 3\""
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 12"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_12.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 14"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_14.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 16"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo chown 0.0 ${PWD}
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_16.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 10"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:10-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
after_success: "[[ -n $TRAVIS_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 3\""
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 12"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:12-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 14"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:14-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 16"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo chown 0.0 ${PWD}
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:16-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
cache:
npm: false

View File

@@ -16,7 +16,7 @@ Lanczos resampling ensures quality is not sacrificed for speed.
As well as image resizing, operations such as
rotation, extraction, compositing and gamma correction are available.
Most modern macOS, Windows and Linux systems running Node.js v10+
Most modern macOS, Windows and Linux systems running Node.js >= 12.13.0
do not require any additional install or runtime dependencies.
## Documentation
@@ -99,7 +99,7 @@ A [guide for contributors](https://github.com/lovell/sharp/blob/master/.github/C
covers reporting bugs, requesting features and submitting code changes.
[![Test Coverage](https://coveralls.io/repos/lovell/sharp/badge.svg?branch=master)](https://coveralls.io/r/lovell/sharp?branch=master)
[![N-API v3](https://img.shields.io/badge/N--API-v3-green.svg)](https://nodejs.org/dist/latest/docs/api/n-api.html#n_api_n_api_version_matrix)
[![Node-API v5](https://img.shields.io/badge/Node--API-v5-green.svg)](https://nodejs.org/dist/latest/docs/api/n-api.html#n_api_n_api_version_matrix)
## Licensing

View File

@@ -4,9 +4,8 @@ build: off
platform: x86
environment:
matrix:
- nodejs_version: "10"
prebuild: true
- nodejs_version: "12"
prebuild: true
- nodejs_version: "14"
- nodejs_version: "16"
install:
@@ -15,4 +14,4 @@ install:
test_script:
- npm test
on_success:
- if [%prebuild%] == [true] if [%APPVEYOR_REPO_TAG%] == [true] npx prebuild --runtime napi --target 3
- if [%prebuild%] == [true] if [%APPVEYOR_REPO_TAG%] == [true] npx prebuild --runtime napi --target 5

View File

@@ -1,7 +1,8 @@
{
'variables': {
'vips_version': '<!(node -p "require(\'./lib/libvips\').minimumLibvipsVersion")',
'sharp_vendor_dir': '<(module_root_dir)/vendor/<(vips_version)'
'platform_and_arch': '<!(node -p "require(\'./lib/platform\')()")',
'sharp_vendor_dir': './vendor/<(vips_version)/<(platform_and_arch)'
},
'targets': [{
'target_name': 'libvips-cpp',
@@ -37,6 +38,7 @@
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'Optimization': 1,
'WholeProgramOptimization': 'true'
},
'VCLibrarianTool': {
@@ -65,9 +67,9 @@
}]
]
}, {
'target_name': 'sharp',
'target_name': 'sharp-<(platform_and_arch)',
'defines': [
'NAPI_VERSION=3'
'NAPI_VERSION=5'
],
'dependencies': [
'<!(node -p "require(\'node-addon-api\').gyp")',
@@ -138,7 +140,7 @@
}],
['OS == "mac"', {
'link_settings': {
'library_dirs': ['<(sharp_vendor_dir)/lib'],
'library_dirs': ['../<(sharp_vendor_dir)/lib'],
'libraries': [
'libvips-cpp.42.dylib'
]
@@ -146,7 +148,7 @@
'xcode_settings': {
'OTHER_LDFLAGS': [
# Ensure runtime linking is relative to sharp.node
'-Wl,-rpath,\'@loader_path/../../vendor/<(vips_version)/lib\''
'-Wl,-rpath,\'@loader_path/../../<(sharp_vendor_dir)/lib\''
]
}
}],
@@ -155,13 +157,13 @@
'_GLIBCXX_USE_CXX11_ABI=1'
],
'link_settings': {
'library_dirs': ['<(sharp_vendor_dir)/lib'],
'library_dirs': ['../<(sharp_vendor_dir)/lib'],
'libraries': [
'-l:libvips-cpp.so.42'
],
'ldflags': [
# Ensure runtime linking is relative to sharp.node
'-Wl,-s -Wl,--disable-new-dtags -Wl,-rpath=\'$$ORIGIN/../../vendor/<(vips_version)/lib\''
'-Wl,-s -Wl,--disable-new-dtags -Wl,-rpath=\'$$ORIGIN/../../<(sharp_vendor_dir)/lib\''
]
}
}]
@@ -172,7 +174,7 @@
'-std=c++0x',
'-fexceptions',
'-Wall',
'-O3'
'-Os'
],
'xcode_settings': {
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11',
@@ -182,7 +184,7 @@
'OTHER_CPLUSPLUSFLAGS': [
'-fexceptions',
'-Wall',
'-O3'
'-Oz'
]
},
'configurations': {
@@ -202,6 +204,7 @@
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'Optimization': 1,
'WholeProgramOptimization': 'true'
},
'VCLibrarianTool': {

View File

@@ -16,7 +16,7 @@ Lanczos resampling ensures quality is not sacrificed for speed.
As well as image resizing, operations such as
rotation, extraction, compositing and gamma correction are available.
Most modern macOS, Windows and Linux systems running Node.js v10+
Most modern macOS, Windows and Linux systems running Node.js >= 12.13.0
do not require any additional install or runtime dependencies.
### Formats

View File

@@ -64,13 +64,18 @@ Extract a single channel from a multi-channel image.
### Examples
```javascript
sharp(input)
// green.jpg is a greyscale image containing the green channel of the input
await sharp(input)
.extractChannel('green')
.toColourspace('b-w')
.toFile('green.jpg', function(err, info) {
// info.channels === 1
// green.jpg is a greyscale image containing the green channel of the input
});
.toFile('green.jpg');
```
```javascript
// red1 is the red value of the first pixel, red2 the second pixel etc.
const [red1, red2, ...] = await sharp(input)
.extractChannel(0)
.raw()
.toBuffer();
```
* Throws **[Error][3]** Invalid channel

View File

@@ -40,6 +40,51 @@ Alternative spelling of `greyscale`.
Returns **Sharp**
## pipelineColourspace
Set the pipeline colourspace.
The input image will be converted to the provided colourspace at the start of the pipeline.
All operations will use this colourspace before converting to the output colourspace, as defined by [toColourspace][6].
This feature is experimental and has not yet been fully-tested with all operations.
### Parameters
* `colourspace` **[string][1]?** pipeline colourspace e.g. `rgb16`, `scrgb`, `lab`, `grey16` [...][7]
### Examples
```javascript
// Run pipeline in 16 bits per channel RGB while converting final result to 8 bits per channel sRGB.
await sharp(input)
.pipelineColourspace('rgb16')
.toColourspace('srgb')
.toFile('16bpc-pipeline-to-8bpc-output.png')
```
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
**Meta**
* **since**: 0.29.0
## pipelineColorspace
Alternative spelling of `pipelineColourspace`.
### Parameters
* `colorspace` **[string][1]?** pipeline colorspace.
<!---->
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
## toColourspace
Set the output colourspace.
@@ -47,7 +92,7 @@ By default output image will be web-friendly sRGB, with additional channels inte
### Parameters
* `colourspace` **[string][1]?** output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][6]
* `colourspace` **[string][1]?** output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][8]
### Examples
@@ -86,4 +131,8 @@ Returns **Sharp**
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[6]: https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568
[6]: #tocolourspace
[7]: https://github.com/libvips/libvips/blob/41cff4e9d0838498487a00623462204eb10ee5b8/libvips/iofuncs/enumtypes.c#L774
[8]: https://github.com/libvips/libvips/blob/3c0bfdf74ce1dc37a6429bed47fa76f16e2cd70a/libvips/iofuncs/enumtypes.c#L777-L794

View File

@@ -13,43 +13,44 @@ Implements the [stream.Duplex][1] class.
### Parameters
* `input` **([Buffer][2] | [Uint8Array][3] | [Uint8ClampedArray][4] | [string][5])?** if present, can be
a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data, or
* `input` **([Buffer][2] | [Uint8Array][3] | [Uint8ClampedArray][4] | [Int8Array][5] | [Uint16Array][6] | [Int16Array][7] | [Uint32Array][8] | [Int32Array][9] | [Float32Array][10] | [Float64Array][11] | [string][12])?** if present, can be
a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image data, or
a TypedArray containing raw pixel image data, or
a String containing the filesystem path to an JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image file.
JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
* `options` **[Object][6]?** if present, is an Object with optional attributes.
* `options` **[Object][13]?** if present, is an Object with optional attributes.
* `options.failOnError` **[boolean][7]** by default halt processing and raise an error when loading invalid images.
* `options.failOnError` **[boolean][14]** by default halt processing and raise an error when loading invalid images.
Set this flag to `false` if you'd rather apply a "best effort" to decode images, even if the data is corrupt or invalid. (optional, default `true`)
* `options.limitInputPixels` **([number][8] | [boolean][7])** Do not process input images where the number of pixels
* `options.limitInputPixels` **([number][15] | [boolean][14])** Do not process input images where the number of pixels
(width x height) exceeds this limit. Assumes image dimensions contained in the input metadata can be trusted.
An integral Number of pixels, zero or false to remove limit, true to use default limit of 268402689 (0x3FFF x 0x3FFF). (optional, default `268402689`)
* `options.sequentialRead` **[boolean][7]** Set this to `true` to use sequential rather than random access where possible.
* `options.sequentialRead` **[boolean][14]** Set this to `true` to use sequential rather than random access where possible.
This can reduce memory usage and might improve performance on some systems. (optional, default `false`)
* `options.density` **[number][8]** number representing the DPI for vector images in the range 1 to 100000. (optional, default `72`)
* `options.pages` **[number][8]** number of pages to extract for multi-page input (GIF, WebP, AVIF, TIFF, PDF), use -1 for all pages. (optional, default `1`)
* `options.page` **[number][8]** page number to start extracting from for multi-page input (GIF, WebP, AVIF, TIFF, PDF), zero based. (optional, default `0`)
* `options.subifd` **[number][8]** subIFD (Sub Image File Directory) to extract for OME-TIFF, defaults to main image. (optional, default `-1`)
* `options.level` **[number][8]** level to extract from a multi-level input (OpenSlide), zero based. (optional, default `0`)
* `options.animated` **[boolean][7]** Set to `true` to read all frames/pages of an animated image (equivalent of setting `pages` to `-1`). (optional, default `false`)
* `options.raw` **[Object][6]?** describes raw pixel input image data. See `raw()` for pixel ordering.
* `options.density` **[number][15]** number representing the DPI for vector images in the range 1 to 100000. (optional, default `72`)
* `options.pages` **[number][15]** number of pages to extract for multi-page input (GIF, WebP, AVIF, TIFF, PDF), use -1 for all pages. (optional, default `1`)
* `options.page` **[number][15]** page number to start extracting from for multi-page input (GIF, WebP, AVIF, TIFF, PDF), zero based. (optional, default `0`)
* `options.subifd` **[number][15]** subIFD (Sub Image File Directory) to extract for OME-TIFF, defaults to main image. (optional, default `-1`)
* `options.level` **[number][15]** level to extract from a multi-level input (OpenSlide), zero based. (optional, default `0`)
* `options.animated` **[boolean][14]** Set to `true` to read all frames/pages of an animated image (equivalent of setting `pages` to `-1`). (optional, default `false`)
* `options.raw` **[Object][13]?** describes raw pixel input image data. See `raw()` for pixel ordering.
* `options.raw.width` **[number][8]?** integral number of pixels wide.
* `options.raw.height` **[number][8]?** integral number of pixels high.
* `options.raw.channels` **[number][8]?** integral number of channels, between 1 and 4.
* `options.raw.premultiplied` **[boolean][7]?** specifies that the raw input has already been premultiplied, set to `true`
* `options.raw.width` **[number][15]?** integral number of pixels wide.
* `options.raw.height` **[number][15]?** integral number of pixels high.
* `options.raw.channels` **[number][15]?** integral number of channels, between 1 and 4.
* `options.raw.premultiplied` **[boolean][14]?** specifies that the raw input has already been premultiplied, set to `true`
to avoid sharp premultiplying the image. (optional, default `false`)
* `options.create` **[Object][6]?** describes a new image to be created.
* `options.create` **[Object][13]?** describes a new image to be created.
* `options.create.width` **[number][8]?** integral number of pixels wide.
* `options.create.height` **[number][8]?** integral number of pixels high.
* `options.create.channels` **[number][8]?** integral number of channels, either 3 (RGB) or 4 (RGBA).
* `options.create.background` **([string][5] | [Object][6])?** parsed by the [color][9] module to extract values for red, green, blue and alpha.
* `options.create.noise` **[Object][6]?** describes a noise to be created.
* `options.create.width` **[number][15]?** integral number of pixels wide.
* `options.create.height` **[number][15]?** integral number of pixels high.
* `options.create.channels` **[number][15]?** integral number of channels, either 3 (RGB) or 4 (RGBA).
* `options.create.background` **([string][12] | [Object][13])?** parsed by the [color][16] module to extract values for red, green, blue and alpha.
* `options.create.noise` **[Object][13]?** describes a noise to be created.
* `options.create.noise.type` **[string][5]?** type of generated noise, currently only `gaussian` is supported.
* `options.create.noise.mean` **[number][8]?** mean of pixels in generated noise.
* `options.create.noise.sigma` **[number][8]?** standard deviation of pixels in generated noise.
* `options.create.noise.type` **[string][12]?** type of generated noise, currently only `gaussian` is supported.
* `options.create.noise.mean` **[number][15]?** mean of pixels in generated noise.
* `options.create.noise.sigma` **[number][15]?** standard deviation of pixels in generated noise.
### Examples
@@ -126,9 +127,9 @@ await sharp({
}).toFile('noise.png');
```
* Throws **[Error][10]** Invalid parameters
* Throws **[Error][17]** Invalid parameters
Returns **[Sharp][11]**
Returns **[Sharp][18]**
## clone
@@ -196,7 +197,7 @@ Promise.all(promises)
});
```
Returns **[Sharp][11]**
Returns **[Sharp][18]**
[1]: http://nodejs.org/api/stream.html#stream_class_stream_duplex
@@ -206,16 +207,30 @@ Returns **[Sharp][11]**
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Uint8ClampedArray
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Int8Array
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Uint16Array
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Int16Array
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Uint32Array
[9]: https://www.npmjs.org/package/color
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Int32Array
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array
[11]: #sharp
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float64Array
[12]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
[13]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
[14]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[15]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
[16]: https://www.npmjs.org/package/color
[17]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[18]: #sharp

View File

@@ -22,6 +22,8 @@ A `Promise` is returned when `callback` is not provided.
* `pagePrimary`: Number of the primary page in a HEIF image
* `levels`: Details of each level in a multi-level image provided as an array of objects, requires libvips compiled with support for OpenSlide
* `subifds`: Number of Sub Image File Directories in an OME-TIFF image
* `background`: Default background colour, if present, for PNG (bKGD) and GIF images, either an RGB Object or a single greyscale value
* `compression`: The encoder used to compress an HEIF file, `av1` (AVIF) or `hevc` (HEIC)
* `hasProfile`: Boolean indicating the presence of an embedded ICC profile
* `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
* `orientation`: Number value of the EXIF Orientation header, if present
@@ -75,6 +77,9 @@ A `Promise` is returned when `callback` is not provided.
* `sharpness`: Estimation of greyscale sharpness based on the standard deviation of a Laplacian convolution, discarding alpha channel if any (experimental)
* `dominant`: Object containing most dominant sRGB colour based on a 4096-bin 3D histogram (experimental)
**Note**: Statistics are derived from the original input image. Any operations performed on the image must first be
written to a buffer in order to run `stats` on the result (see third example).
### Parameters
* `callback` **[Function][4]?** called with the arguments `(err, stats)`
@@ -95,6 +100,14 @@ const { entropy, sharpness, dominant } = await sharp(input).stats();
const { r, g, b } = dominant;
```
```javascript
const image = sharp(input);
// store intermediate result
const part = await image.extract(region).toBuffer();
// create new instance to obtain statistics of extracted region
const stats = await sharp(part).stats();
```
Returns **[Promise][5]<[Object][6]>**
[1]: https://libvips.github.io/libvips/API/current/VipsImage.html#VipsInterpretation

View File

@@ -221,7 +221,9 @@ Produce the "negative" of the image.
### Parameters
* `negate` **[Boolean][6]** (optional, default `true`)
* `options` **[Object][2]?**
* `options.alpha` **[Boolean][6]** Whether or not to negate any alpha channel (optional, default `true`)
Returns **Sharp**
@@ -398,7 +400,9 @@ Returns **Sharp**
## modulate
Transforms the image using brightness, saturation and hue rotation.
Transforms the image using brightness, saturation, hue rotation, and lightness.
Brightness and lightness both operate on luminance, with the difference being that
brightness is multiplicative whereas lightness is additive.
### Parameters
@@ -407,13 +411,14 @@ Transforms the image using brightness, saturation and hue rotation.
* `options.brightness` **[number][1]?** Brightness multiplier
* `options.saturation` **[number][1]?** Saturation multiplier
* `options.hue` **[number][1]?** Degrees for hue rotation
* `options.lightness` **[number][1]?** Lightness addend
### Examples
```javascript
sharp(input)
.modulate({
brightness: 2 // increase lightness by a factor of 2
brightness: 2 // increase brightness by a factor of 2
});
sharp(input)
@@ -421,6 +426,11 @@ sharp(input)
hue: 180 // hue-rotate by 180 degrees
});
sharp(input)
.modulate({
lightness: 50 // increase lightness by +50
});
// decreate brightness and saturation while also hue-rotating by 90 degrees
sharp(input)
.modulate({

View File

@@ -11,6 +11,8 @@ Note that raw pixel data is only supported for buffer output.
By default all metadata will be removed, which includes EXIF-based orientation.
See [withMetadata][1] for control over this.
The caller is responsible for ensuring directory structures and permissions exist.
A `Promise` is returned when `callback` is not provided.
### Parameters
@@ -328,10 +330,57 @@ The prebuilt binaries do not include this - see
Returns **Sharp**
## jp2
Use these JP2 options for output image.
Requires libvips compiled with support for OpenJPEG.
The prebuilt binaries do not include this - see
[installing a custom libvips][11].
### Parameters
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
* `options.lossless` **[boolean][7]** use lossless compression mode (optional, default `false`)
* `options.tileWidth` **[number][9]** horizontal tile size (optional, default `512`)
* `options.tileHeight` **[number][9]** vertical tile size (optional, default `512`)
* `options.chromaSubsampling` **[string][2]** set to '4:2:0' to use chroma subsampling (optional, default `'4:4:4'`)
### Examples
```javascript
// Convert any input to lossless JP2 output
const data = await sharp(input)
.jp2({ lossless: true })
.toBuffer();
```
```javascript
// Convert any input to very high quality JP2 output
const data = await sharp(input)
.jp2({
quality: 100,
chromaSubsampling: '4:4:4'
})
.toBuffer();
```
* Throws **[Error][4]** Invalid options
Returns **Sharp**
**Meta**
* **since**: 0.29.1
## tiff
Use these TIFF options for output image.
The `density` can be set in pixels/inch via [withMetadata][1] instead of providing `xres` and `yres` in pixels/mm.
### Parameters
* `options` **[Object][6]?** output options
@@ -372,14 +421,16 @@ Use these AVIF options for output image.
Whilst it is possible to create AVIF images smaller than 16x16 pixels,
most web browsers do not display these properly.
AVIF image sequences are not supported.
### Parameters
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `50`)
* `options.lossless` **[boolean][7]** use lossless compression (optional, default `false`)
* `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest) (optional, default `5`)
* `options.chromaSubsampling` **[string][2]** set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0 (optional, default `'4:2:0'`)
* `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest) (optional, default `5`)
* `options.chromaSubsampling` **[string][2]** set to '4:2:0' to use chroma subsampling (optional, default `'4:4:4'`)
<!---->
@@ -405,8 +456,8 @@ globally-installed libvips compiled with support for libheif, libde265 and x265.
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `50`)
* `options.compression` **[string][2]** compression format: av1, hevc (optional, default `'av1'`)
* `options.lossless` **[boolean][7]** use lossless compression (optional, default `false`)
* `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest) (optional, default `5`)
* `options.chromaSubsampling` **[string][2]** set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0 (optional, default `'4:2:0'`)
* `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest) (optional, default `5`)
* `options.chromaSubsampling` **[string][2]** set to '4:2:0' to use chroma subsampling (optional, default `'4:4:4'`)
<!---->
@@ -420,30 +471,36 @@ Returns **Sharp**
## raw
Force output to be raw, uncompressed, 8-bit unsigned integer (unit8) pixel data.
Force output to be raw, uncompressed pixel data.
Pixel ordering is left-to-right, top-to-bottom, without padding.
Channel ordering will be RGB or RGBA for non-greyscale colourspaces.
### Parameters
* `options` **[Object][6]?** output options
* `options.depth` **[string][2]** bit depth, one of: char, uchar (default), short, ushort, int, uint, float, complex, double, dpcomplex (optional, default `'uchar'`)
### Examples
```javascript
// Extract raw RGB pixel data from JPEG input
// Extract raw, unsigned 8-bit RGB pixel data from JPEG input
const { data, info } = await sharp('input.jpg')
.raw()
.toBuffer({ resolveWithObject: true });
```
```javascript
// Extract alpha channel as raw pixel data from PNG input
// Extract alpha channel as raw, unsigned 16-bit pixel data from PNG input
const data = await sharp('input.png')
.ensureAlpha()
.extractChannel(3)
.toColourspace('b-w')
.raw()
.raw({ depth: 'ushort' })
.toBuffer();
```
Returns **Sharp**
* Throws **[Error][4]** Invalid options
## tile
@@ -487,6 +544,26 @@ sharp('input.tiff')
Returns **Sharp**
## timeout
Set a timeout for processing, in seconds.
Use a value of zero to continue processing indefinitely, the default behaviour.
The clock starts when libvips opens an input image for processing.
Time spent waiting for a libuv thread to become available is not included.
### Parameters
* `options` **[Object][6]**
* `options.seconds` **[number][9]** Number of seconds after which processing will be stopped
Returns **Sharp**
**Meta**
* **since**: 0.29.2
[1]: #withmetadata
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String

View File

@@ -1,5 +1,99 @@
# Changelog
## v0.29 - *circle*
Requires libvips v8.11.3
### v0.29.3 - 14th November 2021
* Ensure correct dimensions when containing image resized to 1px.
[#2951](https://github.com/lovell/sharp/issues/2951)
* Impute TIFF `xres`/`yres` from `density` provided to `withMetadata`.
[#2952](https://github.com/lovell/sharp/pull/2952)
[@mbklein](https://github.com/mbklein)
### v0.29.2 - 21st October 2021
* Add `timeout` function to limit processing time.
* Ensure `sharp.versions` is populated from vendored libvips.
* Remove animation properties from single page images.
[#2890](https://github.com/lovell/sharp/issues/2890)
* Allow use of 'tif' to select TIFF output.
[#2893](https://github.com/lovell/sharp/pull/2893)
[@erf](https://github.com/erf)
* Improve error message on Windows for version conflict.
[#2918](https://github.com/lovell/sharp/pull/2918)
[@dkrnl](https://github.com/dkrnl)
* Throw error rather than exit when invalid binaries detected.
[#2931](https://github.com/lovell/sharp/issues/2931)
### v0.29.1 - 7th September 2021
* Add `lightness` option to `modulate` operation.
[#2846](https://github.com/lovell/sharp/pull/2846)
* Ensure correct PNG bitdepth is set based on number of colours.
[#2855](https://github.com/lovell/sharp/issues/2855)
* Ensure background is always premultiplied when compositing.
[#2858](https://github.com/lovell/sharp/issues/2858)
* Ensure images with P3 profiles retain full gamut.
[#2862](https://github.com/lovell/sharp/issues/2862)
* Add support for libvips compiled with OpenJPEG.
[#2868](https://github.com/lovell/sharp/pull/2868)
* Remove unsupported animation properties from AVIF output.
[#2870](https://github.com/lovell/sharp/issues/2870)
* Resolve paths before comparing input/output filenames.
[#2878](https://github.com/lovell/sharp/pull/2878)
[@rexxars](https://github.com/rexxars)
* Allow use of speed 9 (fastest) for HEIF encoding.
[#2879](https://github.com/lovell/sharp/pull/2879)
[@rexxars](https://github.com/rexxars)
### v0.29.0 - 17th August 2021
* Drop support for Node.js 10, now requires Node.js >= 12.13.0.
* Add `background` property to PNG and GIF image metadata.
* Add `compression` property to HEIF image metadata.
[#2504](https://github.com/lovell/sharp/issues/2504)
* AVIF encoding now defaults to `4:4:4` chroma subsampling.
[#2562](https://github.com/lovell/sharp/issues/2562)
* Allow multiple platform-arch binaries in same `node_modules` installation tree.
[#2575](https://github.com/lovell/sharp/issues/2575)
* Default to single-channel `b-w` space when `extractChannel` is used.
[#2658](https://github.com/lovell/sharp/issues/2658)
* Allow installation directory to contain spaces (regression in v0.26.0).
[#2777](https://github.com/lovell/sharp/issues/2777)
* Add `pipelineColourspace` operator to set the processing space.
[#2704](https://github.com/lovell/sharp/pull/2704)
[@Daiz](https://github.com/Daiz)
* Allow bit depth to be set when using raw input and output.
[#2762](https://github.com/lovell/sharp/pull/2762)
[@mart-jansink](https://github.com/mart-jansink)
* Allow `negate` to act only on non-alpha channels.
[#2808](https://github.com/lovell/sharp/pull/2808)
[@rexxars](https://github.com/rexxars)
## v0.28 - *bijou*
Requires libvips v8.10.6

1
docs/docute.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -6,7 +6,6 @@
".*",
"build.js",
"firebase.json",
"*.md",
"image/**",
"search-index/**"
],
@@ -14,10 +13,9 @@
{
"source": "**",
"headers": [
{
"key": "Cache-Control",
"value": "max-age=86400"
}
{ "key": "Cache-Control", "value": "max-age=86400" },
{ "key": "X-Content-Type-Options", "value": "nosniff" },
{ "key": "X-Frame-Options", "value": "SAMEORIGIN" }
]
}
],

View File

@@ -212,3 +212,18 @@ GitHub: https://github.com/florian-busch
Name: Matthieu Salettes
GitHub: https://github.com/msalettes
Name: Taneli Vatanen
GitHub: https://github.com/Daiz
Name: Mart Jansink
GitHub: https://github.com/mart-jansink
Name: Tenpi
GitHub: https://github.com/Tenpi
Name: Zaruike
GitHub: https://github.com/Zaruike
Name: Erlend F
GitHub: https://github.com/erf

File diff suppressed because one or more lines are too long

View File

@@ -10,20 +10,20 @@ yarn add sharp
## Prerequisites
* Node.js v10+
* Node.js >= 12.13.0
## Prebuilt binaries
Ready-compiled sharp and libvips binaries are provided for use with
Node.js v10+ on the most common platforms:
Ready-compiled sharp and libvips binaries are provided for use on the most common platforms:
* macOS x64 (>= 10.13)
* macOS ARM64
* Linux x64 (glibc >= 2.17, musl >= 1.1.24)
* Linux ARM64 (glibc >= 2.29, musl >= 1.1.24)
* Windows x64
* Windows x86
An ~7.5MB tarball containing libvips and its most commonly used dependencies
An ~7MB tarball containing libvips and its most commonly used dependencies
is downloaded via HTTPS and stored within `node_modules/sharp/vendor` during `npm install`.
This provides support for the
@@ -31,9 +31,8 @@ JPEG, PNG, WebP, AVIF, TIFF, GIF (input) and SVG (input) image formats.
The following platforms have prebuilt libvips but not sharp:
* macOS ARM64
* Linux ARMv6
* Linux ARMv7 (glibc >= 2.28)
* Linux ARMv6 (glibc >= 2.28)
* Windows ARM64
The following platforms require compilation of both libvips and sharp from source:
@@ -41,6 +40,8 @@ The following platforms require compilation of both libvips and sharp from sourc
* Linux x86
* Linux x64 (glibc <= 2.16, includes RHEL/CentOS 6)
* Linux ARM64 (glibc <= 2.28)
* Linux ARMv7 (glibc <= 2.27, musl)
* Linux ARMv6 (glibc <= 2.27, musl)
* Linux PowerPC
* FreeBSD
* OpenBSD
@@ -61,15 +62,9 @@ Check the output of running `npm install --verbose sharp` for useful error messa
## Apple M1
Prebuilt libvips binaries are provided for macOS on ARM64 (since sharp v0.28.0).
Prebuilt sharp and libvips binaries are provided for macOS on ARM64 from sharp v0.29.0.
During `npm install` sharp will be built locally,
which requires Xcode and Python - see
[building from source](#building-from-source).
When this new ARM64 CPU is made freely available
to open source projects via a CI service
then prebuilt sharp binaries can also be provided.
Prebuilt libvips binaries were provided for macOS on ARM64 from sharp v0.28.0.
## Cross-platform
@@ -138,14 +133,10 @@ To install the prebuilt libvips binaries from a custom URL,
set the `sharp_libvips_binary_host` npm config option
or the `npm_config_sharp_libvips_binary_host` environment variable.
The version subpath and file name are appended to these. There should be tarballs available
that are compressed with both gzip and Brotli, as the format downloaded will vary depending
on whether the user's version of Node supports Brotli decompression (Node.js v10.16.0+)
The version subpath and file name are appended to these.
For example, if `sharp_libvips_binary_host` is set to `https://hostname/path`
and the libvips version is `1.2.3` then the resultant URL will be
`https://hostname/path/v1.2.3/libvips-1.2.3-platform-arch.tar.br` or
`https://hostname/path/v1.2.3/libvips-1.2.3-platform-arch.tar.gz`.
`https://hostname/path/v1.2.3/libvips-1.2.3-platform-arch.tar.br`.
See the Chinese mirror below for a further example.
@@ -210,32 +201,18 @@ to `false` when using the `yarn` package manager.
## AWS Lambda
The binaries in the `node_modules` directory of the
The `node_modules` directory of the
[deployment package](https://docs.aws.amazon.com/lambda/latest/dg/nodejs-package.html)
must be for the Linux x64 platform.
must include binaries for the Linux x64 platform.
When building your deployment package on machines other than Linux x64 (glibc),
run the following commands:
run the following additional command after `npm install`:
macOS:
```sh
rm -rf node_modules/sharp
npm install
SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux sharp
```
Windows:
```sh
rmdir /s /q node_modules/sharp
npm install --arch=x64 --platform=linux sharp
```
Alternatively a Docker container closely matching the Lambda runtime can be used:
```sh
rm -rf node_modules/sharp
docker run -v "$PWD":/var/task lambci/lambda:build-nodejs12.x npm install sharp
```
To get the best performance select the largest memory available.
A 1536 MB function provides ~12x more CPU time than a 128 MB function.

View File

@@ -5,9 +5,11 @@ A test to benchmark the performance of this module relative to alternatives.
## The contenders
* [jimp](https://www.npmjs.com/package/jimp) v0.16.1 - Image processing in pure JavaScript. Provides bicubic interpolation.
* [mapnik](https://www.npmjs.org/package/mapnik) v4.5.6 - Whilst primarily a map renderer, Mapnik contains bitmap image utilities.
* [mapnik](https://www.npmjs.org/package/mapnik) v4.5.8 - Whilst primarily a map renderer, Mapnik contains bitmap image utilities.
* [imagemagick](https://www.npmjs.com/package/imagemagick) v0.1.3 - Supports filesystem only and "*has been unmaintained for a long time*".
* [gm](https://www.npmjs.com/package/gm) v1.23.1 - Fully featured wrapper around GraphicsMagick's `gm` command line utility.
* [@squoosh/lib](https://www.npmjs.com/package/@squoosh/lib) v0.4.0 - Image libraries transpiled to WebAssembly, includes GPLv3 code.
* [@squoosh/cli](https://www.npmjs.com/package/@squoosh/cli) v0.7.2 - Command line wrapper around `@squoosh/lib`, avoids GPLv3 by spawning process.
* sharp v0.28.0 / libvips v8.10.6 - Caching within libvips disabled to ensure a fair comparison.
## The task
@@ -19,21 +21,23 @@ then compress to JPEG at a "quality" setting of 80.
## Test environment
* AWS EC2 eu-west-1 [c5ad.xlarge](https://aws.amazon.com/ec2/instance-types/c5/) (4x AMD EPYC 7R32)
* Ubuntu 20.10 (ami-03f10415e8b0bfb86)
* Node.js v14.16.0
* Ubuntu 21.04 (ami-0d7626a9c2ceab1ac)
* Node.js 16.6.2
## Results
| Module | Input | Output | Ops/sec | Speed-up |
| :----------------- | :----- | :----- | ------: | -------: |
| jimp | buffer | buffer | 0.78 | 1.0 |
| mapnik | buffer | buffer | 3.39 | 4.3 |
| gm | buffer | buffer | 7.84 | 10.1 |
| gm | file | file | 9.24 | 11.8 |
| imagemagick | file | file | 9.37 | 12.0 |
| sharp | stream | stream | 26.84 | 34.4 |
| sharp | file | file | 29.76 | 38.2 |
| sharp | buffer | buffer | 31.60 | 40.5 |
| jimp | buffer | buffer | 0.83 | 1.0 |
| squoosh-cli | file | file | 1.09 | 1.3 |
| squoosh-lib | buffer | buffer | 1.83 | 2.2 |
| mapnik | buffer | buffer | 3.41 | 4.1 |
| gm | buffer | buffer | 8.34 | 10.0 |
| imagemagick | file | file | 8.67 | 10.4 |
| gm | file | file | 8.82 | 10.6 |
| sharp | stream | stream | 29.44 | 35.5 |
| sharp | file | file | 29.64 | 35.7 |
| sharp | buffer | buffer | 31.09 | 37.5 |
Greater libvips performance can be expected with caching enabled (default)
and using 8+ core machines, especially those with larger L1/L2 CPU caches.

File diff suppressed because one or more lines are too long

View File

@@ -4,19 +4,19 @@ const fs = require('fs');
const path = require('path');
const libvips = require('../lib/libvips');
const platform = require('../lib/platform');
const minimumLibvipsVersion = libvips.minimumLibvipsVersion;
const platform = process.env.npm_config_platform || process.platform;
if (platform === 'win32') {
const buildDir = path.join(__dirname, '..', 'build');
const buildReleaseDir = path.join(buildDir, 'Release');
const platformAndArch = platform();
if (platformAndArch.startsWith('win32')) {
const buildReleaseDir = path.join(__dirname, '..', 'build', 'Release');
libvips.log(`Creating ${buildReleaseDir}`);
try {
libvips.mkdirSync(buildDir);
libvips.mkdirSync(buildReleaseDir);
} catch (err) {}
const vendorLibDir = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, 'lib');
const vendorLibDir = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, platformAndArch, 'lib');
libvips.log(`Copying DLLs from ${vendorLibDir} to ${buildReleaseDir}`);
try {
fs

View File

@@ -24,6 +24,7 @@ const minimumGlibcVersionByArch = {
const hasSharpPrebuild = [
'darwin-x64',
'darwin-arm64',
'linux-arm64',
'linux-x64',
'linuxmusl-x64',
@@ -35,7 +36,6 @@ const hasSharpPrebuild = [
const { minimumLibvipsVersion, minimumLibvipsVersionLabelled } = libvips;
const distHost = process.env.npm_config_sharp_libvips_binary_host || 'https://github.com/lovell/sharp-libvips/releases/download';
const distBaseUrl = process.env.npm_config_sharp_dist_base_url || process.env.SHARP_DIST_BASE_URL || `${distHost}/v${minimumLibvipsVersionLabelled}/`;
const supportsBrotli = ('BrotliDecompress' in zlib);
const installationForced = !!(process.env.npm_config_sharp_install_force || process.env.SHARP_INSTALL_FORCE);
const fail = function (err) {
@@ -56,9 +56,7 @@ const handleError = function (err) {
};
const extractTarball = function (tarPath, platformAndArch) {
const vendorPath = path.join(__dirname, '..', 'vendor');
libvips.mkdirSync(vendorPath);
const versionedVendorPath = path.join(vendorPath, minimumLibvipsVersion);
const versionedVendorPath = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, platformAndArch);
libvips.mkdirSync(versionedVendorPath);
const ignoreVendorInclude = hasSharpPrebuild.includes(platformAndArch) && !process.env.npm_config_build_from_source;
@@ -68,7 +66,7 @@ const extractTarball = function (tarPath, platformAndArch) {
stream.pipeline(
fs.createReadStream(tarPath),
supportsBrotli ? new zlib.BrotliDecompress() : new zlib.Gunzip(),
new zlib.BrotliDecompress(),
tarFs.extract(versionedVendorPath, { ignore }),
function (err) {
if (err) {
@@ -121,10 +119,8 @@ try {
handleError(new Error(`Expected Node.js version ${supportedNodeVersion} but found ${process.versions.node}`));
}
const extension = supportsBrotli ? 'br' : 'gz';
// Download to per-process temporary file
const tarFilename = ['libvips', minimumLibvipsVersion, platformAndArch].join('-') + '.tar.' + extension;
const tarFilename = ['libvips', minimumLibvipsVersion, platformAndArch].join('-') + '.tar.br';
const tarPathCache = path.join(libvips.cachePath(), tarFilename);
if (fs.existsSync(tarPathCache)) {
libvips.log(`Using cached ${tarPathCache}`);

View File

@@ -72,13 +72,17 @@ function ensureAlpha (alpha) {
* Extract a single channel from a multi-channel image.
*
* @example
* sharp(input)
* // green.jpg is a greyscale image containing the green channel of the input
* await sharp(input)
* .extractChannel('green')
* .toColourspace('b-w')
* .toFile('green.jpg', function(err, info) {
* // info.channels === 1
* // green.jpg is a greyscale image containing the green channel of the input
* });
* .toFile('green.jpg');
*
* @example
* // red1 is the red value of the first pixel, red2 the second pixel etc.
* const [red1, red2, ...] = await sharp(input)
* .extractChannel(0)
* .raw()
* .toBuffer();
*
* @param {number|string} channel - zero-indexed channel/band number to extract, or `red`, `green`, `blue` or `alpha`.
* @returns {Sharp}
@@ -94,7 +98,7 @@ function extractChannel (channel) {
} else {
throw is.invalidParameterError('channel', 'integer or one of: red, green, blue, alpha', channel);
}
return this;
return this.toColourspace('b-w');
}
/**

View File

@@ -54,6 +54,45 @@ function grayscale (grayscale) {
return this.greyscale(grayscale);
}
/**
* Set the pipeline colourspace.
*
* The input image will be converted to the provided colourspace at the start of the pipeline.
* All operations will use this colourspace before converting to the output colourspace, as defined by {@link toColourspace}.
*
* This feature is experimental and has not yet been fully-tested with all operations.
*
* @since 0.29.0
*
* @example
* // Run pipeline in 16 bits per channel RGB while converting final result to 8 bits per channel sRGB.
* await sharp(input)
* .pipelineColourspace('rgb16')
* .toColourspace('srgb')
* .toFile('16bpc-pipeline-to-8bpc-output.png')
*
* @param {string} [colourspace] - pipeline colourspace e.g. `rgb16`, `scrgb`, `lab`, `grey16` [...](https://github.com/libvips/libvips/blob/41cff4e9d0838498487a00623462204eb10ee5b8/libvips/iofuncs/enumtypes.c#L774)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function pipelineColourspace (colourspace) {
if (!is.string(colourspace)) {
throw is.invalidParameterError('colourspace', 'string', colourspace);
}
this.options.colourspaceInput = colourspace;
return this;
}
/**
* Alternative spelling of `pipelineColourspace`.
* @param {string} [colorspace] - pipeline colorspace.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function pipelineColorspace (colorspace) {
return this.pipelineColourspace(colorspace);
}
/**
* Set the output colourspace.
* By default output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
@@ -64,7 +103,7 @@ function grayscale (grayscale) {
* .toColourspace('rgb16')
* .toFile('16-bpp.png')
*
* @param {string} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568)
* @param {string} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/3c0bfdf74ce1dc37a6429bed47fa76f16e2cd70a/libvips/iofuncs/enumtypes.c#L777-L794)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
@@ -119,6 +158,8 @@ module.exports = function (Sharp) {
tint,
greyscale,
grayscale,
pipelineColourspace,
pipelineColorspace,
toColourspace,
toColorspace,
// Private

View File

@@ -5,32 +5,7 @@ const stream = require('stream');
const is = require('./is');
require('./libvips').hasVendoredLibvips();
/* istanbul ignore next */
try {
require('../build/Release/sharp.node');
} catch (err) {
// Bail early if bindings aren't available
const help = ['', 'Something went wrong installing the "sharp" module', '', err.message, ''];
if (/NODE_MODULE_VERSION/.test(err.message)) {
help.push('- Ensure the version of Node.js used at install time matches that used at runtime');
} else if (/invalid ELF header/.test(err.message)) {
help.push(`- Ensure "${process.platform}" is used at install time as well as runtime`);
} else if (/dylib/.test(err.message) && /Incompatible library version/.test(err.message)) {
help.push('- Run "brew update && brew upgrade vips"');
} else {
help.push(
'- Remove the "node_modules/sharp" directory then run',
' "npm install --ignore-scripts=false --verbose sharp" and look for errors'
);
}
help.push(
'- Consult the installation documentation at https://sharp.pixelplumbing.com/install',
'- Search for this error at https://github.com/lovell/sharp/issues', ''
);
const error = help.join('\n');
throw new Error(error);
}
require('./sharp');
// Use NODE_DEBUG=sharp to enable libvips warnings
const debuglog = util.debuglog('sharp');
@@ -117,8 +92,9 @@ const debuglog = util.debuglog('sharp');
* }
* }).toFile('noise.png');
*
* @param {(Buffer|Uint8Array|Uint8ClampedArray|string)} [input] - if present, can be
* a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data, or
* @param {(Buffer|Uint8Array|Uint8ClampedArray|Int8Array|Uint16Array|Int16Array|Uint32Array|Int32Array|Float32Array|Float64Array|string)} [input] - if present, can be
* a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image data, or
* a TypedArray containing raw pixel image data, or
* a String containing the filesystem path to an JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image file.
* JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
* @param {Object} [options] - if present, is an Object with optional attributes.
@@ -204,6 +180,7 @@ const Sharp = function (input, options) {
flatten: false,
flattenBackground: [0, 0, 0],
negate: false,
negateAlpha: true,
medianSize: 0,
blurSigma: 0,
sharpenSigma: 0,
@@ -222,6 +199,7 @@ const Sharp = function (input, options) {
brightness: 1,
saturation: 1,
hue: 0,
lightness: 0,
booleanBufferIn: null,
booleanFileIn: '',
joinChannelIn: [],
@@ -229,6 +207,7 @@ const Sharp = function (input, options) {
removeAlpha: false,
ensureAlpha: -1,
colourspace: 'srgb',
colourspaceInput: 'last',
composite: [],
// output
fileOut: '',
@@ -254,8 +233,13 @@ const Sharp = function (input, options) {
pngAdaptiveFiltering: false,
pngPalette: false,
pngQuality: 100,
pngColours: 256,
pngBitdepth: 8,
pngDither: 1,
jp2Quality: 80,
jp2TileHeight: 512,
jp2TileWidth: 512,
jp2Lossless: false,
jp2ChromaSubsampling: '4:4:4',
webpQuality: 80,
webpAlphaQuality: 100,
webpLossless: false,
@@ -276,7 +260,8 @@ const Sharp = function (input, options) {
heifLossless: false,
heifCompression: 'av1',
heifSpeed: 5,
heifChromaSubsampling: '4:2:0',
heifChromaSubsampling: '4:4:4',
rawDepth: 'uchar',
tileSize: 256,
tileOverlap: 0,
tileContainer: 'fs',
@@ -288,6 +273,7 @@ const Sharp = function (input, options) {
tileBackground: [255, 255, 255, 255],
tileCentre: false,
tileId: 'https://example.com/iiif',
timeoutSeconds: 0,
linearA: 1,
linearB: 0,
// Function to notify of libvips warnings
@@ -303,7 +289,8 @@ const Sharp = function (input, options) {
this.options.input = this._createInputDescriptor(input, options, { allowStream: true });
return this;
};
util.inherits(Sharp, stream.Duplex);
Object.setPrototypeOf(Sharp.prototype, stream.Duplex.prototype);
Object.setPrototypeOf(Sharp, stream.Duplex);
/**
* Take a "snapshot" of the Sharp instance, returning a new instance.

View File

@@ -2,7 +2,7 @@
const color = require('color');
const is = require('./is');
const sharp = require('../build/Release/sharp.node');
const sharp = require('./sharp');
/**
* Extract input options, if any, from an object.
@@ -34,8 +34,7 @@ function _createInputDescriptor (input, inputOptions, containerOptions) {
throw Error('Input Buffer is empty');
}
inputDescriptor.buffer = input;
} else if (is.uint8Array(input)) {
// Uint8Array or Uint8ClampedArray
} else if (is.typedArray(input)) {
if (input.length === 0) {
throw Error('Input Bit Array is empty');
}
@@ -104,6 +103,37 @@ function _createInputDescriptor (input, inputOptions, containerOptions) {
inputDescriptor.rawHeight = inputOptions.raw.height;
inputDescriptor.rawChannels = inputOptions.raw.channels;
inputDescriptor.rawPremultiplied = !!inputOptions.raw.premultiplied;
switch (input.constructor) {
case Uint8Array:
case Uint8ClampedArray:
inputDescriptor.rawDepth = 'uchar';
break;
case Int8Array:
inputDescriptor.rawDepth = 'char';
break;
case Uint16Array:
inputDescriptor.rawDepth = 'ushort';
break;
case Int16Array:
inputDescriptor.rawDepth = 'short';
break;
case Uint32Array:
inputDescriptor.rawDepth = 'uint';
break;
case Int32Array:
inputDescriptor.rawDepth = 'int';
break;
case Float32Array:
inputDescriptor.rawDepth = 'float';
break;
case Float64Array:
inputDescriptor.rawDepth = 'double';
break;
default:
inputDescriptor.rawDepth = 'uchar';
break;
}
} else {
throw new Error('Expected width, height and channels for raw pixel input');
}
@@ -271,6 +301,8 @@ function _isStreamInput () {
* - `pagePrimary`: Number of the primary page in a HEIF image
* - `levels`: Details of each level in a multi-level image provided as an array of objects, requires libvips compiled with support for OpenSlide
* - `subifds`: Number of Sub Image File Directories in an OME-TIFF image
* - `background`: Default background colour, if present, for PNG (bKGD) and GIF images, either an RGB Object or a single greyscale value
* - `compression`: The encoder used to compress an HEIF file, `av1` (AVIF) or `hevc` (HEIC)
* - `hasProfile`: Boolean indicating the presence of an embedded ICC profile
* - `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
* - `orientation`: Number value of the EXIF Orientation header, if present
@@ -356,6 +388,9 @@ function metadata (callback) {
* - `sharpness`: Estimation of greyscale sharpness based on the standard deviation of a Laplacian convolution, discarding alpha channel if any (experimental)
* - `dominant`: Object containing most dominant sRGB colour based on a 4096-bin 3D histogram (experimental)
*
* **Note**: Statistics are derived from the original input image. Any operations performed on the image must first be
* written to a buffer in order to run `stats` on the result (see third example).
*
* @example
* const image = sharp(inputJpg);
* image
@@ -368,6 +403,13 @@ function metadata (callback) {
* const { entropy, sharpness, dominant } = await sharp(input).stats();
* const { r, g, b } = dominant;
*
* @example
* const image = sharp(input);
* // store intermediate result
* const part = await image.extract(region).toBuffer();
* // create new instance to obtain statistics of extracted region
* const stats = await sharp(part).stats();
*
* @param {Function} [callback] - called with the arguments `(err, stats)`
* @returns {Promise<Object>}
*/

View File

@@ -49,12 +49,26 @@ const buffer = function (val) {
};
/**
* Is this value a Uint8Array or Uint8ClampedArray object?
* Is this value a typed array object?. E.g. Uint8Array or Uint8ClampedArray?
* @private
*/
const uint8Array = function (val) {
// allow both since Uint8ClampedArray simply clamps the values between 0-255
return val instanceof Uint8Array || val instanceof Uint8ClampedArray;
const typedArray = function (val) {
if (defined(val)) {
switch (val.constructor) {
case Uint8Array:
case Uint8ClampedArray:
case Int8Array:
case Uint16Array:
case Int16Array:
case Uint32Array:
case Int32Array:
case Float32Array:
case Float64Array:
return true;
}
}
return false;
};
/**
@@ -119,7 +133,7 @@ module.exports = {
fn: fn,
bool: bool,
buffer: buffer,
uint8Array: uint8Array,
typedArray: typedArray,
string: string,
number: number,
integer: integer,

View File

@@ -21,9 +21,9 @@ const spawnSyncOptions = {
const mkdirSync = function (dirPath) {
try {
fs.mkdirSync(dirPath);
fs.mkdirSync(dirPath, { recursive: true });
} catch (err) {
/* istanbul ignore if */
/* istanbul ignore next */
if (err.code !== 'EEXIST') {
throw err;
}
@@ -67,23 +67,8 @@ const globalLibvipsVersion = function () {
};
const hasVendoredLibvips = function () {
const currentPlatformId = platform();
const vendorPath = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion);
let vendorPlatformId;
try {
vendorPlatformId = require(path.join(vendorPath, 'platform.json'));
} catch (err) {}
/* istanbul ignore else */
if (vendorPlatformId) {
/* istanbul ignore else */
if (currentPlatformId === vendorPlatformId) {
return true;
} else {
throw new Error(`'${vendorPlatformId}' binaries cannot be used on the '${currentPlatformId}' platform. Please remove the 'node_modules/sharp' directory and run 'npm install' on the '${currentPlatformId}' platform.`);
}
} else {
return false;
}
const vendorPath = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, platform());
return fs.existsSync(vendorPath);
};
const pkgConfigPath = function () {

View File

@@ -325,11 +325,19 @@ function gamma (gamma, gammaOut) {
/**
* Produce the "negative" of the image.
* @param {Boolean} [negate=true]
* @param {Object} [options]
* @param {Boolean} [options.alpha=true] Whether or not to negate any alpha channel
* @returns {Sharp}
*/
function negate (negate) {
this.options.negate = is.bool(negate) ? negate : true;
function negate (options) {
this.options.negate = is.bool(options) ? options : true;
if (is.plainObject(options) && 'alpha' in options) {
if (!is.bool(options.alpha)) {
throw is.invalidParameterError('alpha', 'should be boolean value', options.alpha);
} else {
this.options.negateAlpha = options.alpha;
}
}
return this;
}
@@ -562,14 +570,16 @@ function recomb (inputMatrix) {
}
/**
* Transforms the image using brightness, saturation and hue rotation.
* Transforms the image using brightness, saturation, hue rotation, and lightness.
* Brightness and lightness both operate on luminance, with the difference being that
* brightness is multiplicative whereas lightness is additive.
*
* @since 0.22.1
*
* @example
* sharp(input)
* .modulate({
* brightness: 2 // increase lightness by a factor of 2
* brightness: 2 // increase brightness by a factor of 2
* });
*
* sharp(input)
@@ -577,6 +587,11 @@ function recomb (inputMatrix) {
* hue: 180 // hue-rotate by 180 degrees
* });
*
* sharp(input)
* .modulate({
* lightness: 50 // increase lightness by +50
* });
*
* // decreate brightness and saturation while also hue-rotating by 90 degrees
* sharp(input)
* .modulate({
@@ -589,6 +604,7 @@ function recomb (inputMatrix) {
* @param {number} [options.brightness] Brightness multiplier
* @param {number} [options.saturation] Saturation multiplier
* @param {number} [options.hue] Degrees for hue rotation
* @param {number} [options.lightness] Lightness addend
* @returns {Sharp}
*/
function modulate (options) {
@@ -616,6 +632,13 @@ function modulate (options) {
throw is.invalidParameterError('hue', 'number', options.hue);
}
}
if ('lightness' in options) {
if (is.number(options.lightness)) {
this.options.lightness = options.lightness;
} else {
throw is.invalidParameterError('lightness', 'number', options.lightness);
}
}
return this;
}

View File

@@ -1,7 +1,8 @@
'use strict';
const path = require('path');
const is = require('./is');
const sharp = require('../build/Release/sharp.node');
const sharp = require('./sharp');
const formats = new Map([
['heic', 'heif'],
@@ -12,11 +13,17 @@ const formats = new Map([
['png', 'png'],
['raw', 'raw'],
['tiff', 'tiff'],
['tif', 'tiff'],
['webp', 'webp'],
['gif', 'gif']
['gif', 'gif'],
['jp2', 'jp2'],
['jpx', 'jp2'],
['j2k', 'jp2'],
['j2c', 'jp2']
]);
const errMagickSave = new Error('GIF output requires libvips with support for ImageMagick');
const errJp2Save = new Error('JP2 output requires libvips with support for OpenJPEG');
/**
* Write output image data to a file.
@@ -28,6 +35,8 @@ const errMagickSave = new Error('GIF output requires libvips with support for Im
* By default all metadata will be removed, which includes EXIF-based orientation.
* See {@link withMetadata} for control over this.
*
* The caller is responsible for ensuring directory structures and permissions exist.
*
* A `Promise` is returned when `callback` is not provided.
*
* @example
@@ -52,7 +61,7 @@ function toFile (fileOut, callback) {
let err;
if (!is.string(fileOut)) {
err = new Error('Missing output file path');
} else if (this.options.input.file === fileOut) {
} else if (is.string(this.options.input.file) && path.resolve(this.options.input.file) === path.resolve(fileOut)) {
err = new Error('Cannot use same file for input and output');
} else if (this.options.formatOut === 'input' && fileOut.toLowerCase().endsWith('.gif') && !this.constructor.format.magick.output.file) {
err = errMagickSave;
@@ -403,7 +412,7 @@ function png (options) {
const colours = options.colours || options.colors;
if (is.defined(colours)) {
if (is.integer(colours) && is.inRange(colours, 2, 256)) {
this.options.pngColours = colours;
this.options.pngBitdepth = 1 << 31 - Math.clz32(Math.ceil(Math.log2(colours)));
} else {
throw is.invalidParameterError('colours', 'integer between 2 and 256', colours);
}
@@ -509,6 +518,84 @@ function gif (options) {
return this._updateFormatOut('gif', options);
}
/**
* Use these JP2 options for output image.
*
* Requires libvips compiled with support for OpenJPEG.
* The prebuilt binaries do not include this - see
* {@link https://sharp.pixelplumbing.com/install#custom-libvips installing a custom libvips}.
*
* @example
* // Convert any input to lossless JP2 output
* const data = await sharp(input)
* .jp2({ lossless: true })
* .toBuffer();
*
* @example
* // Convert any input to very high quality JP2 output
* const data = await sharp(input)
* .jp2({
* quality: 100,
* chromaSubsampling: '4:4:4'
* })
* .toBuffer();
*
* @since 0.29.1
*
* @param {Object} [options] - output options
* @param {number} [options.quality=80] - quality, integer 1-100
* @param {boolean} [options.lossless=false] - use lossless compression mode
* @param {number} [options.tileWidth=512] - horizontal tile size
* @param {number} [options.tileHeight=512] - vertical tile size
* @param {string} [options.chromaSubsampling='4:4:4'] - set to '4:2:0' to use chroma subsampling
* @returns {Sharp}
* @throws {Error} Invalid options
*/
/* istanbul ignore next */
function jp2 (options) {
if (!this.constructor.format.jp2k.output.buffer) {
throw errJp2Save;
}
if (is.object(options)) {
if (is.defined(options.quality)) {
if (is.integer(options.quality) && is.inRange(options.quality, 1, 100)) {
this.options.jp2Quality = options.quality;
} else {
throw is.invalidParameterError('quality', 'integer between 1 and 100', options.quality);
}
}
if (is.defined(options.lossless)) {
if (is.bool(options.lossless)) {
this.options.jp2Lossless = options.lossless;
} else {
throw is.invalidParameterError('lossless', 'boolean', options.lossless);
}
}
if (is.defined(options.tileWidth)) {
if (is.integer(options.tileWidth) && is.inRange(options.tileWidth, 1, 32768)) {
this.options.jp2TileWidth = options.tileWidth;
} else {
throw is.invalidParameterError('tileWidth', 'integer between 1 and 32768', options.tileWidth);
}
}
if (is.defined(options.tileHeight)) {
if (is.integer(options.tileHeight) && is.inRange(options.tileHeight, 1, 32768)) {
this.options.jp2TileHeight = options.tileHeight;
} else {
throw is.invalidParameterError('tileHeight', 'integer between 1 and 32768', options.tileHeight);
}
}
if (is.defined(options.chromaSubsampling)) {
if (is.string(options.chromaSubsampling) && is.inArray(options.chromaSubsampling, ['4:2:0', '4:4:4'])) {
this.options.heifChromaSubsampling = options.chromaSubsampling;
} else {
throw is.invalidParameterError('chromaSubsampling', 'one of: 4:2:0, 4:4:4', options.chromaSubsampling);
}
}
}
return this._updateFormatOut('jp2', options);
}
/**
* Set animation options if available.
* @private
@@ -550,6 +637,8 @@ function trySetAnimationOptions (source, target) {
/**
* Use these TIFF options for output image.
*
* The `density` can be set in pixels/inch via {@link withMetadata} instead of providing `xres` and `yres` in pixels/mm.
*
* @example
* // Convert SVG input to LZW-compressed, 1 bit per pixel TIFF output
* sharp('input.svg')
@@ -654,13 +743,15 @@ function tiff (options) {
* Whilst it is possible to create AVIF images smaller than 16x16 pixels,
* most web browsers do not display these properly.
*
* AVIF image sequences are not supported.
*
* @since 0.27.0
*
* @param {Object} [options] - output options
* @param {number} [options.quality=50] - quality, integer 1-100
* @param {boolean} [options.lossless=false] - use lossless compression
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:2:0'] - set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:4:4'] - set to '4:2:0' to use chroma subsampling
* @returns {Sharp}
* @throws {Error} Invalid options
*/
@@ -680,8 +771,8 @@ function avif (options) {
* @param {number} [options.quality=50] - quality, integer 1-100
* @param {string} [options.compression='av1'] - compression format: av1, hevc
* @param {boolean} [options.lossless=false] - use lossless compression
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:2:0'] - set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:4:4'] - set to '4:2:0' to use chroma subsampling
* @returns {Sharp}
* @throws {Error} Invalid options
*/
@@ -709,10 +800,10 @@ function heif (options) {
}
}
if (is.defined(options.speed)) {
if (is.integer(options.speed) && is.inRange(options.speed, 0, 8)) {
if (is.integer(options.speed) && is.inRange(options.speed, 0, 9)) {
this.options.heifSpeed = options.speed;
} else {
throw is.invalidParameterError('speed', 'integer between 0 and 8', options.speed);
throw is.invalidParameterError('speed', 'integer between 0 and 9', options.speed);
}
}
if (is.defined(options.chromaSubsampling)) {
@@ -727,28 +818,41 @@ function heif (options) {
}
/**
* Force output to be raw, uncompressed, 8-bit unsigned integer (unit8) pixel data.
* Force output to be raw, uncompressed pixel data.
* Pixel ordering is left-to-right, top-to-bottom, without padding.
* Channel ordering will be RGB or RGBA for non-greyscale colourspaces.
*
* @example
* // Extract raw RGB pixel data from JPEG input
* // Extract raw, unsigned 8-bit RGB pixel data from JPEG input
* const { data, info } = await sharp('input.jpg')
* .raw()
* .toBuffer({ resolveWithObject: true });
*
* @example
* // Extract alpha channel as raw pixel data from PNG input
* // Extract alpha channel as raw, unsigned 16-bit pixel data from PNG input
* const data = await sharp('input.png')
* .ensureAlpha()
* .extractChannel(3)
* .toColourspace('b-w')
* .raw()
* .raw({ depth: 'ushort' })
* .toBuffer();
*
* @returns {Sharp}
* @param {Object} [options] - output options
* @param {string} [options.depth='uchar'] - bit depth, one of: char, uchar (default), short, ushort, int, uint, float, complex, double, dpcomplex
* @throws {Error} Invalid options
*/
function raw () {
function raw (options) {
if (is.object(options)) {
if (is.defined(options.depth)) {
if (is.string(options.depth) && is.inArray(options.depth,
['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'float', 'complex', 'double', 'dpcomplex']
)) {
this.options.rawDepth = options.depth;
} else {
throw is.invalidParameterError('depth', 'one of: char, uchar, short, ushort, int, uint, float, complex, double, dpcomplex', options.depth);
}
}
}
return this._updateFormatOut('raw');
}
@@ -873,6 +977,31 @@ function tile (options) {
return this._updateFormatOut('dz');
}
/**
* Set a timeout for processing, in seconds.
* Use a value of zero to continue processing indefinitely, the default behaviour.
*
* The clock starts when libvips opens an input image for processing.
* Time spent waiting for a libuv thread to become available is not included.
*
* @since 0.29.2
*
* @param {Object} options
* @param {number} options.seconds - Number of seconds after which processing will be stopped
* @returns {Sharp}
*/
function timeout (options) {
if (!is.plainObject(options)) {
throw is.invalidParameterError('options', 'object', options);
}
if (is.integer(options.seconds) && is.inRange(options.seconds, 0, 3600)) {
this.options.timeoutSeconds = options.seconds;
} else {
throw is.invalidParameterError('seconds', 'integer between 0 and 3600', options.seconds);
}
return this;
}
/**
* Update the output format unless options.force is false,
* in which case revert to input format.
@@ -1018,6 +1147,7 @@ module.exports = function (Sharp) {
withMetadata,
toFormat,
jpeg,
jp2,
png,
webp,
tiff,
@@ -1026,6 +1156,7 @@ module.exports = function (Sharp) {
gif,
raw,
tile,
timeout,
// Private
_updateFormatOut,
_setBooleanOption,

31
lib/sharp.js Normal file
View File

@@ -0,0 +1,31 @@
'use strict';
const platformAndArch = require('./platform')();
/* istanbul ignore next */
try {
module.exports = require(`../build/Release/sharp-${platformAndArch}.node`);
} catch (err) {
// Bail early if bindings aren't available
const help = ['', 'Something went wrong installing the "sharp" module', '', err.message, '', 'Possible solutions:'];
if (/dylib/.test(err.message) && /Incompatible library version/.test(err.message)) {
help.push('- Update Homebrew: "brew update && brew upgrade vips"');
} else {
help.push(
'- Install with the --verbose flag and look for errors: "npm install --ignore-scripts=false --verbose sharp"',
`- Install for the current runtime: "npm install --platform=${process.platform} --arch=${process.arch} sharp"`
);
}
help.push(
'- Consult the installation documentation: https://sharp.pixelplumbing.com/install'
);
// Check loaded
if (process.platform === 'win32') {
const loadedModule = Object.keys(require.cache).find((i) => /[\\/]build[\\/]Release[\\/]sharp(.*)\.node$/.test(i));
if (loadedModule) {
const [, loadedPackage] = loadedModule.match(/node_modules[\\/]([^\\/]+)[\\/]/);
help.push(`- Ensure the version of sharp aligns with the ${loadedPackage} package: "npm ls sharp"`);
}
}
throw new Error(help.join('\n'));
}

View File

@@ -4,7 +4,8 @@ const events = require('events');
const detectLibc = require('detect-libc');
const is = require('./is');
const sharp = require('../build/Release/sharp.node');
const platformAndArch = require('./platform')();
const sharp = require('./sharp');
/**
* An Object containing nested boolean values representing the available input and output formats/methods.
@@ -45,7 +46,7 @@ let versions = {
vips: sharp.libvipsVersion()
};
try {
versions = require(`../vendor/${versions.vips}/versions.json`);
versions = require(`../vendor/${versions.vips}/${platformAndArch}/versions.json`);
} catch (err) {}
/**

View File

@@ -1,7 +1,7 @@
{
"name": "sharp",
"description": "High performance Node.js image processing, the fastest module to resize JPEG, PNG, WebP, AVIF and TIFF images",
"version": "0.28.3",
"version": "0.29.3",
"author": "Lovell Fuller <npm@lovell.info>",
"homepage": "https://github.com/lovell/sharp",
"contributors": [
@@ -77,7 +77,9 @@
"alza54 <alza54@thiocod.in>",
"Jacob Smith <jacob@frende.me>",
"Michael Nutt <michael@nutt.im>",
"Brad Parham <baparham@gmail.com>"
"Brad Parham <baparham@gmail.com>",
"Taneli Vatanen <taneli.vatanen@gmail.com>",
"Joris Dugué <zaruike10@gmail.com>"
],
"scripts": {
"install": "(node install/libvips && node install/dll-copy && prebuild-install) || (node install/can-compile && node-gyp rebuild && node install/dll-copy)",
@@ -111,6 +113,7 @@
"tiff",
"gif",
"svg",
"jp2",
"dzi",
"image",
"resize",
@@ -121,45 +124,45 @@
"vips"
],
"dependencies": {
"color": "^3.1.3",
"color": "^4.0.1",
"detect-libc": "^1.0.3",
"node-addon-api": "^3.2.0",
"prebuild-install": "^6.1.2",
"node-addon-api": "^4.2.0",
"prebuild-install": "^7.0.0",
"semver": "^7.3.5",
"simple-get": "^3.1.0",
"simple-get": "^4.0.0",
"tar-fs": "^2.1.1",
"tunnel-agent": "^0.6.0"
},
"devDependencies": {
"async": "^3.2.0",
"async": "^3.2.2",
"cc": "^3.0.1",
"decompress-zip": "^0.3.3",
"documentation": "^13.2.5",
"exif-reader": "^1.0.3",
"icc": "^2.0.0",
"license-checker": "^25.0.1",
"mocha": "^8.4.0",
"mock-fs": "^4.14.0",
"mocha": "^9.1.3",
"mock-fs": "^5.1.2",
"nyc": "^15.1.0",
"prebuild": "^10.0.1",
"prebuild": "^11.0.0",
"rimraf": "^3.0.2",
"semistandard": "^16.0.0"
"semistandard": "^16.0.1"
},
"license": "Apache-2.0",
"config": {
"libvips": "8.10.6",
"libvips": "8.11.3",
"runtime": "napi",
"target": 3
"target": 5
},
"engines": {
"node": ">=10"
"node": ">=12.13.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"binary": {
"napi_versions": [
3
5
]
},
"semistandard": {

View File

@@ -92,6 +92,9 @@ namespace sharp {
}
// Raw pixel input
if (HasAttr(input, "rawChannels")) {
descriptor->rawDepth = static_cast<VipsBandFormat>(
vips_enum_from_nick(nullptr, VIPS_TYPE_BAND_FORMAT,
AttrAsStr(input, "rawDepth").data()));
descriptor->rawChannels = AttrAsUint32(input, "rawChannels");
descriptor->rawWidth = AttrAsUint32(input, "rawWidth");
descriptor->rawHeight = AttrAsUint32(input, "rawHeight");
@@ -154,6 +157,10 @@ namespace sharp {
bool IsGif(std::string const &str) {
return EndsWith(str, ".gif") || EndsWith(str, ".GIF");
}
bool IsJp2(std::string const &str) {
return EndsWith(str, ".jp2") || EndsWith(str, ".jpx") || EndsWith(str, ".j2k") || EndsWith(str, ".j2c")
|| EndsWith(str, ".JP2") || EndsWith(str, ".JPX") || EndsWith(str, ".J2K") || EndsWith(str, ".J2C");
}
bool IsTiff(std::string const &str) {
return EndsWith(str, ".tif") || EndsWith(str, ".tiff") || EndsWith(str, ".TIF") || EndsWith(str, ".TIFF");
}
@@ -187,6 +194,7 @@ namespace sharp {
case ImageType::WEBP: id = "webp"; break;
case ImageType::TIFF: id = "tiff"; break;
case ImageType::GIF: id = "gif"; break;
case ImageType::JP2: id = "jp2"; break;
case ImageType::SVG: id = "svg"; break;
case ImageType::HEIF: id = "heif"; break;
case ImageType::PDF: id = "pdf"; break;
@@ -223,6 +231,8 @@ namespace sharp {
{ "VipsForeignLoadGifBuffer", ImageType::GIF },
{ "VipsForeignLoadNsgifFile", ImageType::GIF },
{ "VipsForeignLoadNsgifBuffer", ImageType::GIF },
{ "VipsForeignLoadJp2kBuffer", ImageType::JP2 },
{ "VipsForeignLoadJp2kFile", ImageType::JP2 },
{ "VipsForeignLoadSvgFile", ImageType::SVG },
{ "VipsForeignLoadSvgBuffer", ImageType::SVG },
{ "VipsForeignLoadHeifFile", ImageType::HEIF },
@@ -231,6 +241,8 @@ namespace sharp {
{ "VipsForeignLoadPdfBuffer", ImageType::PDF },
{ "VipsForeignLoadMagickFile", ImageType::MAGICK },
{ "VipsForeignLoadMagickBuffer", ImageType::MAGICK },
{ "VipsForeignLoadMagick7File", ImageType::MAGICK },
{ "VipsForeignLoadMagick7Buffer", ImageType::MAGICK },
{ "VipsForeignLoadOpenslide", ImageType::OPENSLIDE },
{ "VipsForeignLoadPpmFile", ImageType::PPM },
{ "VipsForeignLoadFits", ImageType::FITS },
@@ -282,6 +294,7 @@ namespace sharp {
imageType == ImageType::WEBP ||
imageType == ImageType::MAGICK ||
imageType == ImageType::GIF ||
imageType == ImageType::JP2 ||
imageType == ImageType::TIFF ||
imageType == ImageType::HEIF ||
imageType == ImageType::PDF;
@@ -297,7 +310,7 @@ namespace sharp {
if (descriptor->rawChannels > 0) {
// Raw, uncompressed pixel data
image = VImage::new_from_memory(descriptor->buffer, descriptor->bufferLength,
descriptor->rawWidth, descriptor->rawHeight, descriptor->rawChannels, VIPS_FORMAT_UCHAR);
descriptor->rawWidth, descriptor->rawHeight, descriptor->rawChannels, descriptor->rawDepth);
if (descriptor->rawChannels < 3) {
image.get_image()->Type = VIPS_INTERPRETATION_B_W;
} else {
@@ -505,6 +518,17 @@ namespace sharp {
return copy;
}
/*
Remove animation properties from image.
*/
VImage RemoveAnimationProperties(VImage image) {
VImage copy = image.copy();
copy.remove(VIPS_META_PAGE_HEIGHT);
copy.remove("delay");
copy.remove("loop");
return copy;
}
/*
Does this image have a non-default density?
*/
@@ -586,6 +610,33 @@ namespace sharp {
return warning;
}
/*
Attach an event listener for progress updates, used to detect timeout
*/
void SetTimeout(VImage image, int const seconds) {
if (seconds > 0) {
VipsImage *im = image.get_image();
if (im->progress_signal == NULL) {
int *timeout = VIPS_NEW(im, int);
*timeout = seconds;
g_signal_connect(im, "eval", G_CALLBACK(VipsProgressCallBack), timeout);
vips_image_set_progress(im, TRUE);
}
}
}
/*
Event listener for progress updates, used to detect timeout
*/
void VipsProgressCallBack(VipsImage *im, VipsProgress *progress, int *timeout) {
// printf("VipsProgressCallBack progress=%d run=%d timeout=%d\n", progress->percent, progress->run, *timeout);
if (*timeout > 0 && progress->run >= *timeout) {
vips_image_set_kill(im, TRUE);
vips_error("timeout", "%d%% complete", progress->percent);
*timeout = 0;
}
}
/*
Calculate the (left, top) coordinates of the output image
within the input image, applying the given gravity during an embed.
@@ -754,23 +805,27 @@ namespace sharp {
/*
Convert RGBA value to another colourspace
*/
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba, VipsInterpretation const interpretation) {
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba,
VipsInterpretation const interpretation, bool premultiply) {
int const bands = static_cast<int>(rgba.size());
if (bands < 3 || interpretation == VIPS_INTERPRETATION_sRGB || interpretation == VIPS_INTERPRETATION_RGB) {
if (bands < 3) {
return rgba;
} else {
VImage pixel = VImage::new_matrix(1, 1);
pixel.set("bands", bands);
pixel = pixel.new_from_image(rgba);
pixel = pixel.colourspace(interpretation, VImage::option()->set("source_space", VIPS_INTERPRETATION_sRGB));
return pixel(0, 0);
}
VImage pixel = VImage::new_matrix(1, 1);
pixel.set("bands", bands);
pixel = pixel
.new_from_image(rgba)
.colourspace(interpretation, VImage::option()->set("source_space", VIPS_INTERPRETATION_sRGB));
if (premultiply) {
pixel = pixel.premultiply();
}
return pixel(0, 0);
}
/*
Apply the alpha channel to a given colour
*/
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour) {
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour, bool premultiply) {
// Scale up 8-bit values to match 16-bit input image
double const multiplier = sharp::Is16Bit(image.interpretation()) ? 256.0 : 1.0;
// Create alphaColour colour
@@ -794,7 +849,7 @@ namespace sharp {
alphaColour.push_back(colour[3] * multiplier);
}
// Ensure alphaColour colour uses correct colourspace
alphaColour = sharp::GetRgbaAsColourspace(alphaColour, image.interpretation());
alphaColour = sharp::GetRgbaAsColourspace(alphaColour, image.interpretation(), premultiply);
// Add non-transparent alpha channel, if required
if (colour[3] < 255.0 && !HasAlpha(image)) {
image = image.bandjoin(
@@ -824,4 +879,5 @@ namespace sharp {
}
return image;
}
} // namespace sharp

View File

@@ -25,9 +25,9 @@
// Verify platform and compiler compatibility
#if (VIPS_MAJOR_VERSION < 8) || \
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION < 10) || \
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION == 10 && VIPS_MICRO_VERSION < 6)
#error "libvips version 8.10.6+ is required - please see https://sharp.pixelplumbing.com/install"
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION < 11) || \
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION == 11 && VIPS_MICRO_VERSION < 3)
#error "libvips version 8.11.3+ is required - please see https://sharp.pixelplumbing.com/install"
#endif
#if ((!defined(__clang__)) && defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 6)))
@@ -54,6 +54,7 @@ namespace sharp {
size_t bufferLength;
bool isBuffer;
double density;
VipsBandFormat rawDepth;
int rawChannels;
int rawWidth;
int rawHeight;
@@ -78,6 +79,7 @@ namespace sharp {
bufferLength(0),
isBuffer(FALSE),
density(72.0),
rawDepth(VIPS_FORMAT_UCHAR),
rawChannels(0),
rawWidth(0),
rawHeight(0),
@@ -114,6 +116,7 @@ namespace sharp {
JPEG,
PNG,
WEBP,
JP2,
TIFF,
GIF,
SVG,
@@ -140,6 +143,7 @@ namespace sharp {
bool IsJpeg(std::string const &str);
bool IsPng(std::string const &str);
bool IsWebp(std::string const &str);
bool IsJp2(std::string const &str);
bool IsGif(std::string const &str);
bool IsTiff(std::string const &str);
bool IsHeic(std::string const &str);
@@ -206,6 +210,11 @@ namespace sharp {
*/
VImage SetAnimationProperties(VImage image, int pageHeight, std::vector<int> delay, int loop);
/*
Remove animation properties from image.
*/
VImage RemoveAnimationProperties(VImage image);
/*
Does this image have a non-default density?
*/
@@ -241,6 +250,16 @@ namespace sharp {
*/
std::string VipsWarningPop();
/*
Attach an event listener for progress updates, used to detect timeout
*/
void SetTimeout(VImage image, int const timeoutSeconds);
/*
Event listener for progress updates, used to detect timeout
*/
void VipsProgressCallBack(VipsImage *image, VipsProgress *progress, int *timeoutSeconds);
/*
Calculate the (left, top) coordinates of the output image
within the input image, applying the given gravity during an embed.
@@ -286,12 +305,13 @@ namespace sharp {
/*
Convert RGBA value to another colourspace
*/
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba, VipsInterpretation const interpretation);
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba,
VipsInterpretation const interpretation, bool premultiply);
/*
Apply the alpha channel to a given colour
*/
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour);
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour, bool premultiply);
/*
Removes alpha channel, if any.

View File

@@ -110,19 +110,6 @@ VSource::new_from_options( const char *options )
return( out );
}
VOption *
VOption::set( const char *name, const VSource value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_SOURCE );
g_value_set_object( &pair->value, value.get_source() );
options.push_back( pair );
return( this );
}
VTarget
VTarget::new_to_descriptor( int descriptor )
{
@@ -162,17 +149,4 @@ VTarget::new_to_memory()
return( out );
}
VOption *
VOption::set( const char *name, const VTarget value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_TARGET );
g_value_set_object( &pair->value, value.get_target() );
options.push_back( pair );
return( this );
}
VIPS_NAMESPACE_END

View File

@@ -51,6 +51,12 @@
VIPS_NAMESPACE_START
/**
* \namespace vips
*
* General docs for the vips namespace.
*/
std::vector<double>
to_vectorv( int n, ... )
{
@@ -140,6 +146,20 @@ VOption::set( const char *name, int value )
return( this );
}
// input guint64
VOption *
VOption::set( const char *name, guint64 value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, G_TYPE_UINT64 );
g_value_set_uint64( &pair->value, value );
options.push_back( pair );
return( this );
}
// input double
VOption *
VOption::set( const char *name, double value )
@@ -167,39 +187,17 @@ VOption::set( const char *name, const char *value )
return( this );
}
// input image
// input vips object (image, source, target, etc. etc.)
VOption *
VOption::set( const char *name, const VImage value )
VOption::set( const char *name, const VObject value )
{
Pair *pair = new Pair( name );
VipsObject *object = value.get_object();
GType type = G_OBJECT_TYPE( object );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_IMAGE );
g_value_set_object( &pair->value, value.get_image() );
options.push_back( pair );
return( this );
}
// input double array
VOption *
VOption::set( const char *name, std::vector<double> value )
{
Pair *pair = new Pair( name );
double *array;
unsigned int i;
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_ARRAY_DOUBLE );
vips_value_set_array_double( &pair->value, NULL,
static_cast< int >( value.size() ) );
array = vips_value_get_array_double( &pair->value, NULL );
for( i = 0; i < value.size(); i++ )
array[i] = value[i];
g_value_init( &pair->value, type );
g_value_set_object( &pair->value, object );
options.push_back( pair );
return( this );
@@ -229,6 +227,30 @@ VOption::set( const char *name, std::vector<int> value )
return( this );
}
// input double array
VOption *
VOption::set( const char *name, std::vector<double> value )
{
Pair *pair = new Pair( name );
double *array;
unsigned int i;
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_ARRAY_DOUBLE );
vips_value_set_array_double( &pair->value, NULL,
static_cast< int >( value.size() ) );
array = vips_value_get_array_double( &pair->value, NULL );
for( i = 0; i < value.size(); i++ )
array[i] = value[i];
options.push_back( pair );
return( this );
}
// input image array
VOption *
VOption::set( const char *name, std::vector<VImage> value )
@@ -619,6 +641,22 @@ VImage::new_from_source( VSource source, const char *option_string,
return( out );
}
VImage
VImage::new_from_memory_steal( void *data, size_t size,
int width, int height, int bands, VipsBandFormat format )
{
VipsImage *image;
if( !(image = vips_image_new_from_memory( data, size,
width, height, bands, format )) )
throw( VError() );
g_signal_connect( image, "postclose",
G_CALLBACK( vips_image_free_buffer ), data);
return( VImage( image ) );
}
VImage
VImage::new_matrix( int width, int height )
{

View File

@@ -60,17 +60,4 @@ VInterpolate::new_from_name( const char *name, VOption *options )
return( out );
}
VOption *
VOption::set( const char *name, const VInterpolate value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_INTERPOLATE );
g_value_set_object( &pair->value, value.get_interpolate() );
options.push_back( pair );
return( this );
}
VIPS_NAMESPACE_END

View File

@@ -1,5 +1,5 @@
// bodies for vips operations
// Sun 5 Jul 22:36:37 BST 2020
// Wed May 12 11:30:00 AM CEST 2021
// this file is generated automatically, do not edit!
VImage VImage::CMC2LCh( VOption *options ) const
@@ -1065,6 +1065,18 @@ VImage VImage::fitsload( const char *filename, VOption *options )
return( out );
}
VImage VImage::fitsload_source( VSource source, VOption *options )
{
VImage out;
call( "fitsload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::fitssave( const char *filename, VOption *options ) const
{
call( "fitssave",
@@ -1656,6 +1668,70 @@ VImage VImage::join( VImage in2, VipsDirection direction, VOption *options ) con
return( out );
}
VImage VImage::jp2kload( const char *filename, VOption *options )
{
VImage out;
call( "jp2kload",
(options ? options : VImage::option())->
set( "out", &out )->
set( "filename", filename ) );
return( out );
}
VImage VImage::jp2kload_buffer( VipsBlob *buffer, VOption *options )
{
VImage out;
call( "jp2kload_buffer",
(options ? options : VImage::option())->
set( "out", &out )->
set( "buffer", buffer ) );
return( out );
}
VImage VImage::jp2kload_source( VSource source, VOption *options )
{
VImage out;
call( "jp2kload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::jp2ksave( const char *filename, VOption *options ) const
{
call( "jp2ksave",
(options ? options : VImage::option())->
set( "in", *this )->
set( "filename", filename ) );
}
VipsBlob *VImage::jp2ksave_buffer( VOption *options ) const
{
VipsBlob *buffer;
call( "jp2ksave_buffer",
(options ? options : VImage::option())->
set( "in", *this )->
set( "buffer", &buffer ) );
return( buffer );
}
void VImage::jp2ksave_target( VTarget target, VOption *options ) const
{
call( "jp2ksave_target",
(options ? options : VImage::option())->
set( "in", *this )->
set( "target", target ) );
}
VImage VImage::jpegload( const char *filename, VOption *options )
{
VImage out;
@@ -1727,6 +1803,70 @@ void VImage::jpegsave_target( VTarget target, VOption *options ) const
set( "target", target ) );
}
VImage VImage::jxlload( const char *filename, VOption *options )
{
VImage out;
call( "jxlload",
(options ? options : VImage::option())->
set( "out", &out )->
set( "filename", filename ) );
return( out );
}
VImage VImage::jxlload_buffer( VipsBlob *buffer, VOption *options )
{
VImage out;
call( "jxlload_buffer",
(options ? options : VImage::option())->
set( "out", &out )->
set( "buffer", buffer ) );
return( out );
}
VImage VImage::jxlload_source( VSource source, VOption *options )
{
VImage out;
call( "jxlload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::jxlsave( const char *filename, VOption *options ) const
{
call( "jxlsave",
(options ? options : VImage::option())->
set( "in", *this )->
set( "filename", filename ) );
}
VipsBlob *VImage::jxlsave_buffer( VOption *options ) const
{
VipsBlob *buffer;
call( "jxlsave_buffer",
(options ? options : VImage::option())->
set( "in", *this )->
set( "buffer", &buffer ) );
return( buffer );
}
void VImage::jxlsave_target( VTarget target, VOption *options ) const
{
call( "jxlsave_target",
(options ? options : VImage::option())->
set( "in", *this )->
set( "target", target ) );
}
VImage VImage::labelregions( VOption *options ) const
{
VImage mask;
@@ -2284,6 +2424,18 @@ VImage VImage::niftiload( const char *filename, VOption *options )
return( out );
}
VImage VImage::niftiload_source( VSource source, VOption *options )
{
VImage out;
call( "niftiload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::niftisave( const char *filename, VOption *options ) const
{
call( "niftisave",
@@ -2316,6 +2468,18 @@ VImage VImage::openslideload( const char *filename, VOption *options )
return( out );
}
VImage VImage::openslideload_source( VSource source, VOption *options )
{
VImage out;
call( "openslideload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
VImage VImage::pdfload( const char *filename, VOption *options )
{
VImage out;
@@ -3388,6 +3552,18 @@ VImage VImage::vipsload( const char *filename, VOption *options )
return( out );
}
VImage VImage::vipsload_source( VSource source, VOption *options )
{
VImage out;
call( "vipsload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::vipssave( const char *filename, VOption *options ) const
{
call( "vipssave",
@@ -3396,6 +3572,14 @@ void VImage::vipssave( const char *filename, VOption *options ) const
set( "filename", filename ) );
}
void VImage::vipssave_target( VTarget target, VOption *options ) const
{
call( "vipssave_target",
(options ? options : VImage::option())->
set( "in", *this )->
set( "target", target ) );
}
VImage VImage::webpload( const char *filename, VOption *options )
{
VImage out;

View File

@@ -90,6 +90,9 @@ class MetadataWorker : public Napi::AsyncWorker {
baton->subifds = image.get_int(VIPS_META_N_SUBIFDS);
}
baton->hasProfile = sharp::HasProfile(image);
if (image.get_typeof("background") == VIPS_TYPE_ARRAY_DOUBLE) {
baton->background = image.get_array_double("background");
}
// Derived attributes
baton->hasAlpha = sharp::HasAlpha(image);
baton->orientation = sharp::ExifOrientation(image);
@@ -209,6 +212,17 @@ class MetadataWorker : public Napi::AsyncWorker {
if (baton->subifds > 0) {
info.Set("subifds", baton->subifds);
}
if (!baton->background.empty()) {
if (baton->background.size() == 3) {
Napi::Object background = Napi::Object::New(env);
background.Set("r", baton->background[0]);
background.Set("g", baton->background[1]);
background.Set("b", baton->background[2]);
info.Set("background", background);
} else {
info.Set("background", baton->background[0]);
}
}
info.Set("hasProfile", baton->hasProfile);
info.Set("hasAlpha", baton->hasAlpha);
if (baton->orientation > 0) {

View File

@@ -42,6 +42,7 @@ struct MetadataBaton {
std::string compression;
std::vector<std::pair<int, int>> levels;
int subifds;
std::vector<double> background;
bool hasProfile;
bool hasAlpha;
int orientation;

View File

@@ -112,6 +112,19 @@ namespace sharp {
}
}
/**
* Produce the "negative" of the image.
*/
VImage Negate(VImage image, bool const negateAlpha) {
if (HasAlpha(image) && !negateAlpha) {
// Separate alpha channel
VImage alpha = image[image.bands() - 1];
return RemoveAlpha(image).invert().bandjoin(alpha);
} else {
return image.invert();
}
}
/*
* Gaussian blur. Use sigma of -1.0 for fast blur.
*/
@@ -169,7 +182,8 @@ namespace sharp {
0.0, 0.0, 0.0, 1.0));
}
VImage Modulate(VImage image, double const brightness, double const saturation, int const hue) {
VImage Modulate(VImage image, double const brightness, double const saturation,
int const hue, double const lightness) {
if (HasAlpha(image)) {
// Separate alpha channel
VImage alpha = image[image.bands() - 1];
@@ -177,7 +191,7 @@ namespace sharp {
.colourspace(VIPS_INTERPRETATION_LCH)
.linear(
{ brightness, saturation, 1},
{ 0.0, 0.0, static_cast<double>(hue) }
{ lightness, 0.0, static_cast<double>(hue) }
)
.colourspace(VIPS_INTERPRETATION_sRGB)
.bandjoin(alpha);
@@ -186,7 +200,7 @@ namespace sharp {
.colourspace(VIPS_INTERPRETATION_LCH)
.linear(
{ brightness, saturation, 1 },
{ 0.0, 0.0, static_cast<double>(hue) }
{ lightness, 0.0, static_cast<double>(hue) }
)
.colourspace(VIPS_INTERPRETATION_sRGB);
}
@@ -282,4 +296,16 @@ namespace sharp {
return image.linear(a, b);
}
}
/*
* Ensure the image is in a given colourspace
*/
VImage EnsureColourspace(VImage image, VipsInterpretation colourspace) {
if (colourspace != VIPS_INTERPRETATION_LAST && image.interpretation() != colourspace) {
image = image.colourspace(colourspace,
VImage::option()->set("source_space", image.interpretation()));
}
return image;
}
} // namespace sharp

View File

@@ -45,6 +45,11 @@ namespace sharp {
*/
VImage Gamma(VImage image, double const exponent);
/*
* Produce the "negative" of the image.
*/
VImage Negate(VImage image, bool const negateAlpha);
/*
* Gaussian blur. Use sigma of -1.0 for fast blur.
*/
@@ -93,9 +98,15 @@ namespace sharp {
VImage Recomb(VImage image, std::unique_ptr<double[]> const &matrix);
/*
* Modulate brightness, saturation and hue
* Modulate brightness, saturation, hue and lightness
*/
VImage Modulate(VImage image, double const brightness, double const saturation, int const hue);
VImage Modulate(VImage image, double const brightness, double const saturation,
int const hue, double const lightness);
/*
* Ensure the image is in a given colourspace
*/
VImage EnsureColourspace(VImage image, VipsInterpretation colourspace);
} // namespace sharp

View File

@@ -67,6 +67,7 @@ class PipelineWorker : public Napi::AsyncWorker {
vips::VImage image;
sharp::ImageType inputImageType;
std::tie(image, inputImageType) = sharp::OpenInput(baton->input);
image = sharp::EnsureColourspace(image, baton->colourspaceInput);
// Calculate angle of rotation
VipsAngle rotation;
@@ -89,7 +90,7 @@ class PipelineWorker : public Napi::AsyncWorker {
}
if (baton->rotationAngle != 0.0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground, FALSE);
image = image.rotate(baton->rotationAngle, VImage::option()->set("background", background));
}
}
@@ -214,7 +215,7 @@ class PipelineWorker : public Napi::AsyncWorker {
double yresidual = static_cast<double>(yshrink) / yfactor;
// If integral x and y shrink are equal, try to use shrink-on-load for JPEG and WebP,
// but not when applying gamma correction, pre-resize extract or trim
// but not when applying gamma correction, pre-resize extract, trim or input colourspace
int shrink_on_load = 1;
int shrink_on_load_factor = 1;
@@ -227,6 +228,7 @@ class PipelineWorker : public Napi::AsyncWorker {
xshrink == yshrink && xshrink >= 2 * shrink_on_load_factor &&
(inputImageType == sharp::ImageType::JPEG || inputImageType == sharp::ImageType::WEBP) &&
baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimThreshold == 0.0 &&
baton->colourspaceInput == VIPS_INTERPRETATION_LAST &&
image.width() > 3 && image.height() > 3 && baton->input->pages == 1
) {
if (xshrink >= 8 * shrink_on_load_factor) {
@@ -287,16 +289,21 @@ class PipelineWorker : public Napi::AsyncWorker {
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeHeight);
}
}
// Remove animation properties from single page images
if (baton->input->pages == 1) {
image = sharp::RemoveAnimationProperties(image);
}
// Ensure we're using a device-independent colour space
char const *processingProfile = image.interpretation() == VIPS_INTERPRETATION_RGB16 ? "p3" : "srgb";
if (
sharp::HasProfile(image) &&
image.interpretation() != VIPS_INTERPRETATION_LABS &&
image.interpretation() != VIPS_INTERPRETATION_GREY16
) {
// Convert to sRGB using embedded profile
// Convert to sRGB/P3 using embedded profile
try {
image = image.icc_transform("srgb", VImage::option()
image = image.icc_transform(processingProfile, VImage::option()
->set("embedded", TRUE)
->set("depth", image.interpretation() == VIPS_INTERPRETATION_RGB16 ? 16 : 8)
->set("intent", VIPS_INTENT_PERCEPTUAL));
@@ -304,7 +311,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Ignore failure of embedded profile
}
} else if (image.interpretation() == VIPS_INTERPRETATION_CMYK) {
image = image.icc_transform("srgb", VImage::option()
image = image.icc_transform(processingProfile, VImage::option()
->set("input_profile", "cmyk")
->set("intent", VIPS_INTENT_PERCEPTUAL));
}
@@ -325,7 +332,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Negate the colours in the image
if (baton->negate) {
image = image.invert();
image = sharp::Negate(image, baton->negateAlpha);
}
// Gamma encoding (darken)
@@ -344,7 +351,8 @@ class PipelineWorker : public Napi::AsyncWorker {
bool const shouldSharpen = baton->sharpenSigma != 0.0;
bool const shouldApplyMedian = baton->medianSize > 0;
bool const shouldComposite = !baton->composite.empty();
bool const shouldModulate = baton->brightness != 1.0 || baton->saturation != 1.0 || baton->hue != 0.0;
bool const shouldModulate = baton->brightness != 1.0 || baton->saturation != 1.0 ||
baton->hue != 0.0 || baton->lightness != 0.0;
bool const shouldApplyClahe = baton->claheWidth != 0 && baton->claheHeight != 0;
if (shouldComposite && !sharp::HasAlpha(image)) {
@@ -374,11 +382,15 @@ class PipelineWorker : public Napi::AsyncWorker {
// Ensure shortest edge is at least 1 pixel
if (image.width() / xfactor < 0.5) {
xfactor = 2 * image.width();
baton->width = 1;
if (baton->canvas != Canvas::EMBED) {
baton->width = 1;
}
}
if (image.height() / yfactor < 0.5) {
yfactor = 2 * image.height();
baton->height = 1;
if (baton->canvas != Canvas::EMBED) {
baton->height = 1;
}
}
image = image.resize(1.0 / xfactor, VImage::option()
->set("vscale", 1.0 / yfactor)
@@ -411,6 +423,7 @@ class PipelineWorker : public Napi::AsyncWorker {
for (unsigned int i = 0; i < baton->joinChannelIn.size(); i++) {
std::tie(joinImage, joinImageType) = sharp::OpenInput(baton->joinChannelIn[i]);
joinImage = sharp::EnsureColourspace(joinImage, baton->colourspaceInput);
image = image.bandjoin(joinImage);
}
image = image.copy(VImage::option()->set("interpretation", baton->colourspace));
@@ -420,7 +433,7 @@ class PipelineWorker : public Napi::AsyncWorker {
if (image.width() != baton->width || image.height() != baton->height) {
if (baton->canvas == Canvas::EMBED) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground, shouldPremultiplyAlpha);
// Embed
@@ -477,7 +490,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Rotate post-extract non-90 angle
if (!baton->rotateBeforePreExtract && baton->rotationAngle != 0.0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground, shouldPremultiplyAlpha);
image = image.rotate(baton->rotationAngle, VImage::option()->set("background", background));
}
@@ -490,7 +503,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Affine transform
if (baton->affineMatrix.size() > 0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->affineBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->affineBackground, shouldPremultiplyAlpha);
image = image.affine(baton->affineMatrix, VImage::option()->set("background", background)
->set("idx", baton->affineIdx)
->set("idy", baton->affineIdy)
@@ -502,7 +515,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Extend edges
if (baton->extendTop > 0 || baton->extendBottom > 0 || baton->extendLeft > 0 || baton->extendRight > 0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->extendBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->extendBackground, shouldPremultiplyAlpha);
// Embed
baton->width = image.width() + baton->extendLeft + baton->extendRight;
@@ -539,7 +552,7 @@ class PipelineWorker : public Napi::AsyncWorker {
}
if (shouldModulate) {
image = sharp::Modulate(image, baton->brightness, baton->saturation, baton->hue);
image = sharp::Modulate(image, baton->brightness, baton->saturation, baton->hue, baton->lightness);
}
// Sharpen
@@ -552,7 +565,8 @@ class PipelineWorker : public Napi::AsyncWorker {
for (Composite *composite : baton->composite) {
VImage compositeImage;
sharp::ImageType compositeImageType = sharp::ImageType::UNKNOWN;
std::tie(compositeImage, compositeImageType) = OpenInput(composite->input);
std::tie(compositeImage, compositeImageType) = sharp::OpenInput(composite->input);
compositeImage = sharp::EnsureColourspace(compositeImage, baton->colourspaceInput);
// Verify within current dimensions
if (compositeImage.width() > image.width() || compositeImage.height() > image.height()) {
throw vips::VError("Image to composite must have same dimensions or smaller");
@@ -661,6 +675,7 @@ class PipelineWorker : public Napi::AsyncWorker {
VImage booleanImage;
sharp::ImageType booleanImageType = sharp::ImageType::UNKNOWN;
std::tie(booleanImage, booleanImageType) = sharp::OpenInput(baton->boolean);
booleanImage = sharp::EnsureColourspace(booleanImage, baton->colourspaceInput);
image = sharp::Boolean(image, booleanImage, baton->booleanOp);
}
@@ -710,9 +725,10 @@ class PipelineWorker : public Napi::AsyncWorker {
// Convert colourspace, pass the current known interpretation so libvips doesn't have to guess
image = image.colourspace(baton->colourspace, VImage::option()->set("source_space", image.interpretation()));
// Transform colours from embedded profile to output profile
if (baton->withMetadata && sharp::HasProfile(image)) {
image = image.icc_transform(vips_enum_nick(VIPS_TYPE_INTERPRETATION, baton->colourspace),
VImage::option()->set("embedded", TRUE));
if (baton->withMetadata && sharp::HasProfile(image) && baton->withMetadataIcc.empty()) {
image = image.icc_transform("srgb", VImage::option()
->set("embedded", TRUE)
->set("intent", VIPS_INTENT_PERCEPTUAL));
}
}
@@ -721,7 +737,8 @@ class PipelineWorker : public Napi::AsyncWorker {
image = image.icc_transform(
const_cast<char*>(baton->withMetadataIcc.data()),
VImage::option()
->set("input_profile", "srgb")
->set("input_profile", processingProfile)
->set("embedded", TRUE)
->set("intent", VIPS_INTENT_PERCEPTUAL));
}
// Override EXIF Orientation tag
@@ -755,6 +772,7 @@ class PipelineWorker : public Napi::AsyncWorker {
baton->loop);
// Output
sharp::SetTimeout(image, baton->timeoutSeconds);
if (baton->fileOut.empty()) {
// Buffer output
if (baton->formatOut == "jpeg" || (baton->formatOut == "input" && inputImageType == sharp::ImageType::JPEG)) {
@@ -765,8 +783,8 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("Q", baton->jpegQuality)
->set("interlace", baton->jpegProgressive)
->set("subsample_mode", baton->jpegChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_JPEG_SUBSAMPLE_OFF
: VIPS_FOREIGN_JPEG_SUBSAMPLE_ON)
? VIPS_FOREIGN_SUBSAMPLE_OFF
: VIPS_FOREIGN_SUBSAMPLE_ON)
->set("trellis_quant", baton->jpegTrellisQuantisation)
->set("quant_table", baton->jpegQuantisationTable)
->set("overshoot_deringing", baton->jpegOvershootDeringing)
@@ -782,6 +800,22 @@ class PipelineWorker : public Napi::AsyncWorker {
} else {
baton->channels = std::min(baton->channels, 3);
}
} else if (baton->formatOut == "jp2" || (baton->formatOut == "input"
&& inputImageType == sharp::ImageType::JP2)) {
// Write JP2 to Buffer
sharp::AssertImageTypeDimensions(image, sharp::ImageType::JP2);
VipsArea *area = reinterpret_cast<VipsArea*>(image.jp2ksave_buffer(VImage::option()
->set("Q", baton->jp2Quality)
->set("lossless", baton->jp2Lossless)
->set("subsample_mode", baton->jp2ChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
->set("tile_height", baton->jp2TileHeight)
->set("tile_width", baton->jp2TileWidth)));
baton->bufferOut = static_cast<char*>(area->data);
baton->bufferOutLength = area->length;
area->free_fn = nullptr;
vips_area_unref(area);
baton->formatOut = "jp2";
} else if (baton->formatOut == "png" || (baton->formatOut == "input" &&
(inputImageType == sharp::ImageType::PNG || (inputImageType == sharp::ImageType::GIF && !supportsGifOutput) ||
inputImageType == sharp::ImageType::SVG))) {
@@ -794,7 +828,7 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("filter", baton->pngAdaptiveFiltering ? VIPS_FOREIGN_PNG_FILTER_ALL : VIPS_FOREIGN_PNG_FILTER_NONE)
->set("palette", baton->pngPalette)
->set("Q", baton->pngQuality)
->set("colours", baton->pngColours)
->set("bitdepth", baton->pngBitdepth)
->set("dither", baton->pngDither)));
baton->bufferOut = static_cast<char*>(area->data);
baton->bufferOutLength = area->length;
@@ -863,15 +897,14 @@ class PipelineWorker : public Napi::AsyncWorker {
} else if (baton->formatOut == "heif" ||
(baton->formatOut == "input" && inputImageType == sharp::ImageType::HEIF)) {
// Write HEIF to buffer
image = sharp::RemoveAnimationProperties(image);
VipsArea *area = reinterpret_cast<VipsArea*>(image.heifsave_buffer(VImage::option()
->set("strip", !baton->withMetadata)
->set("compression", baton->heifCompression)
->set("Q", baton->heifQuality)
->set("compression", baton->heifCompression)
->set("speed", baton->heifSpeed)
#if defined(VIPS_TYPE_FOREIGN_SUBSAMPLE)
->set("subsample_mode", baton->heifChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
#endif
->set("lossless", baton->heifLossless)));
baton->bufferOut = static_cast<char*>(area->data);
baton->bufferOutLength = area->length;
@@ -886,9 +919,9 @@ class PipelineWorker : public Napi::AsyncWorker {
image = image[0];
baton->channels = 1;
}
if (image.format() != VIPS_FORMAT_UCHAR) {
// Cast pixels to uint8 (unsigned char)
image = image.cast(VIPS_FORMAT_UCHAR);
if (image.format() != baton->rawDepth) {
// Cast pixels to requested format
image = image.cast(baton->rawDepth);
}
// Get raw image data
baton->bufferOut = static_cast<char*>(image.write_to_memory(&baton->bufferOutLength));
@@ -914,13 +947,14 @@ class PipelineWorker : public Napi::AsyncWorker {
bool const isWebp = sharp::IsWebp(baton->fileOut);
bool const isGif = sharp::IsGif(baton->fileOut);
bool const isTiff = sharp::IsTiff(baton->fileOut);
bool const isJp2 = sharp::IsJp2(baton->fileOut);
bool const isHeif = sharp::IsHeif(baton->fileOut);
bool const isDz = sharp::IsDz(baton->fileOut);
bool const isDzZip = sharp::IsDzZip(baton->fileOut);
bool const isV = sharp::IsV(baton->fileOut);
bool const mightMatchInput = baton->formatOut == "input";
bool const willMatchInput = mightMatchInput &&
!(isJpeg || isPng || isWebp || isGif || isTiff || isHeif || isDz || isDzZip || isV);
!(isJpeg || isPng || isWebp || isGif || isTiff || isJp2 || isHeif || isDz || isDzZip || isV);
if (baton->formatOut == "jpeg" || (mightMatchInput && isJpeg) ||
(willMatchInput && inputImageType == sharp::ImageType::JPEG)) {
@@ -931,8 +965,8 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("Q", baton->jpegQuality)
->set("interlace", baton->jpegProgressive)
->set("subsample_mode", baton->jpegChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_JPEG_SUBSAMPLE_OFF
: VIPS_FOREIGN_JPEG_SUBSAMPLE_ON)
? VIPS_FOREIGN_SUBSAMPLE_OFF
: VIPS_FOREIGN_SUBSAMPLE_ON)
->set("trellis_quant", baton->jpegTrellisQuantisation)
->set("quant_table", baton->jpegQuantisationTable)
->set("overshoot_deringing", baton->jpegOvershootDeringing)
@@ -940,6 +974,18 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("optimize_coding", baton->jpegOptimiseCoding));
baton->formatOut = "jpeg";
baton->channels = std::min(baton->channels, 3);
} else if (baton->formatOut == "jp2" || (mightMatchInput && isJp2) ||
(willMatchInput && (inputImageType == sharp::ImageType::JP2))) {
// Write JP2 to file
sharp::AssertImageTypeDimensions(image, sharp::ImageType::JP2);
image.jp2ksave(const_cast<char*>(baton->fileOut.data()), VImage::option()
->set("Q", baton->jp2Quality)
->set("lossless", baton->jp2Lossless)
->set("subsample_mode", baton->jp2ChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
->set("tile_height", baton->jp2TileHeight)
->set("tile_width", baton->jp2TileWidth));
baton->formatOut = "jp2";
} else if (baton->formatOut == "png" || (mightMatchInput && isPng) || (willMatchInput &&
(inputImageType == sharp::ImageType::PNG || (inputImageType == sharp::ImageType::GIF && !supportsGifOutput) ||
inputImageType == sharp::ImageType::SVG))) {
@@ -952,7 +998,7 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("filter", baton->pngAdaptiveFiltering ? VIPS_FOREIGN_PNG_FILTER_ALL : VIPS_FOREIGN_PNG_FILTER_NONE)
->set("palette", baton->pngPalette)
->set("Q", baton->pngQuality)
->set("colours", baton->pngColours)
->set("bitdepth", baton->pngBitdepth)
->set("dither", baton->pngDither));
baton->formatOut = "png";
} else if (baton->formatOut == "webp" || (mightMatchInput && isWebp) ||
@@ -1005,15 +1051,14 @@ class PipelineWorker : public Napi::AsyncWorker {
} else if (baton->formatOut == "heif" || (mightMatchInput && isHeif) ||
(willMatchInput && inputImageType == sharp::ImageType::HEIF)) {
// Write HEIF to file
image = sharp::RemoveAnimationProperties(image);
image.heifsave(const_cast<char*>(baton->fileOut.data()), VImage::option()
->set("strip", !baton->withMetadata)
->set("Q", baton->heifQuality)
->set("compression", baton->heifCompression)
->set("speed", baton->heifSpeed)
#if defined(VIPS_TYPE_FOREIGN_SUBSAMPLE)
->set("subsample_mode", baton->heifChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
#endif
->set("lossless", baton->heifLossless));
baton->formatOut = "heif";
} else if (baton->formatOut == "dz" || isDz || isDzZip) {
@@ -1130,6 +1175,9 @@ class PipelineWorker : public Napi::AsyncWorker {
info.Set("width", static_cast<uint32_t>(width));
info.Set("height", static_cast<uint32_t>(height));
info.Set("channels", static_cast<uint32_t>(baton->channels));
if (baton->formatOut == "raw") {
info.Set("depth", vips_enum_nick(VIPS_TYPE_BAND_FORMAT, baton->rawDepth));
}
info.Set("premultiplied", baton->premultiplied);
if (baton->hasCropOffset) {
info.Set("cropOffsetLeft", static_cast<int32_t>(baton->cropOffsetLeft));
@@ -1319,10 +1367,12 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->flatten = sharp::AttrAsBool(options, "flatten");
baton->flattenBackground = sharp::AttrAsVectorOfDouble(options, "flattenBackground");
baton->negate = sharp::AttrAsBool(options, "negate");
baton->negateAlpha = sharp::AttrAsBool(options, "negateAlpha");
baton->blurSigma = sharp::AttrAsDouble(options, "blurSigma");
baton->brightness = sharp::AttrAsDouble(options, "brightness");
baton->saturation = sharp::AttrAsDouble(options, "saturation");
baton->hue = sharp::AttrAsInt32(options, "hue");
baton->lightness = sharp::AttrAsDouble(options, "lightness");
baton->medianSize = sharp::AttrAsUint32(options, "medianSize");
baton->sharpenSigma = sharp::AttrAsDouble(options, "sharpenSigma");
baton->sharpenFlat = sharp::AttrAsDouble(options, "sharpenFlat");
@@ -1389,6 +1439,10 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->recombMatrix[i] = sharp::AttrAsDouble(recombMatrix, i);
}
}
baton->colourspaceInput = sharp::GetInterpretation(sharp::AttrAsStr(options, "colourspaceInput"));
if (baton->colourspaceInput == VIPS_INTERPRETATION_ERROR) {
baton->colourspaceInput = VIPS_INTERPRETATION_LAST;
}
baton->colourspace = sharp::GetInterpretation(sharp::AttrAsStr(options, "colourspace"));
if (baton->colourspace == VIPS_INTERPRETATION_ERROR) {
baton->colourspace = VIPS_INTERPRETATION_sRGB;
@@ -1406,6 +1460,7 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
std::string k = sharp::AttrAsStr(mdStrKeys, i);
baton->withMetadataStrs.insert(std::make_pair(k, sharp::AttrAsStr(mdStrs, k)));
}
baton->timeoutSeconds = sharp::AttrAsUint32(options, "timeoutSeconds");
// Format-specific
baton->jpegQuality = sharp::AttrAsUint32(options, "jpegQuality");
baton->jpegProgressive = sharp::AttrAsBool(options, "jpegProgressive");
@@ -1420,8 +1475,13 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->pngAdaptiveFiltering = sharp::AttrAsBool(options, "pngAdaptiveFiltering");
baton->pngPalette = sharp::AttrAsBool(options, "pngPalette");
baton->pngQuality = sharp::AttrAsUint32(options, "pngQuality");
baton->pngColours = sharp::AttrAsUint32(options, "pngColours");
baton->pngBitdepth = sharp::AttrAsUint32(options, "pngBitdepth");
baton->pngDither = sharp::AttrAsDouble(options, "pngDither");
baton->jp2Quality = sharp::AttrAsUint32(options, "jp2Quality");
baton->jp2Lossless = sharp::AttrAsBool(options, "jp2Lossless");
baton->jp2TileHeight = sharp::AttrAsUint32(options, "jp2TileHeight");
baton->jp2TileWidth = sharp::AttrAsUint32(options, "jp2TileWidth");
baton->jp2ChromaSubsampling = sharp::AttrAsStr(options, "jp2ChromaSubsampling");
baton->webpQuality = sharp::AttrAsUint32(options, "webpQuality");
baton->webpAlphaQuality = sharp::AttrAsUint32(options, "webpAlphaQuality");
baton->webpLossless = sharp::AttrAsBool(options, "webpLossless");
@@ -1436,6 +1496,9 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->tiffTileHeight = sharp::AttrAsUint32(options, "tiffTileHeight");
baton->tiffXres = sharp::AttrAsDouble(options, "tiffXres");
baton->tiffYres = sharp::AttrAsDouble(options, "tiffYres");
if (baton->tiffXres == 1.0 && baton->tiffYres == 1.0 && baton->withMetadataDensity > 0) {
baton->tiffXres = baton->tiffYres = baton->withMetadataDensity / 25.4;
}
// tiff compression options
baton->tiffCompression = static_cast<VipsForeignTiffCompression>(
vips_enum_from_nick(nullptr, VIPS_TYPE_FOREIGN_TIFF_COMPRESSION,
@@ -1451,6 +1514,11 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->heifSpeed = sharp::AttrAsUint32(options, "heifSpeed");
baton->heifChromaSubsampling = sharp::AttrAsStr(options, "heifChromaSubsampling");
// Raw output
baton->rawDepth = static_cast<VipsBandFormat>(
vips_enum_from_nick(nullptr, VIPS_TYPE_BAND_FORMAT,
sharp::AttrAsStr(options, "rawDepth").data()));
// Animated output
if (sharp::HasAttr(options, "pageHeight")) {
baton->pageHeight = sharp::AttrAsUint32(options, "pageHeight");

View File

@@ -90,10 +90,12 @@ struct PipelineBaton {
bool flatten;
std::vector<double> flattenBackground;
bool negate;
bool negateAlpha;
double blurSigma;
double brightness;
double saturation;
int hue;
double lightness;
int medianSize;
double sharpenSigma;
double sharpenFlat;
@@ -145,8 +147,13 @@ struct PipelineBaton {
bool pngAdaptiveFiltering;
bool pngPalette;
int pngQuality;
int pngColours;
int pngBitdepth;
double pngDither;
int jp2Quality;
bool jp2Lossless;
int jp2TileHeight;
int jp2TileWidth;
std::string jp2ChromaSubsampling;
int webpQuality;
int webpAlphaQuality;
bool webpNearLossless;
@@ -168,12 +175,14 @@ struct PipelineBaton {
int heifSpeed;
std::string heifChromaSubsampling;
bool heifLossless;
VipsBandFormat rawDepth;
std::string err;
bool withMetadata;
int withMetadataOrientation;
double withMetadataDensity;
std::string withMetadataIcc;
std::unordered_map<std::string, std::string> withMetadataStrs;
int timeoutSeconds;
std::unique_ptr<double[]> convKernel;
int convKernelWidth;
int convKernelHeight;
@@ -185,6 +194,7 @@ struct PipelineBaton {
int extractChannel;
bool removeAlpha;
double ensureAlpha;
VipsInterpretation colourspaceInput;
VipsInterpretation colourspace;
int pageHeight;
std::vector<int> delay;
@@ -219,10 +229,12 @@ struct PipelineBaton {
flatten(false),
flattenBackground{ 0.0, 0.0, 0.0 },
negate(false),
negateAlpha(true),
blurSigma(0.0),
brightness(1.0),
saturation(1.0),
hue(0),
lightness(0),
medianSize(0),
sharpenSigma(0.0),
sharpenFlat(1.0),
@@ -272,8 +284,13 @@ struct PipelineBaton {
pngAdaptiveFiltering(false),
pngPalette(false),
pngQuality(100),
pngColours(256),
pngBitdepth(8),
pngDither(1.0),
jp2Quality(80),
jp2Lossless(false),
jp2TileHeight(512),
jp2TileWidth(512),
jp2ChromaSubsampling("4:4:4"),
webpQuality(80),
webpAlphaQuality(100),
webpNearLossless(false),
@@ -293,11 +310,13 @@ struct PipelineBaton {
heifQuality(50),
heifCompression(VIPS_FOREIGN_HEIF_COMPRESSION_AV1),
heifSpeed(5),
heifChromaSubsampling("4:2:0"),
heifChromaSubsampling("4:4:4"),
heifLossless(false),
rawDepth(VIPS_FORMAT_UCHAR),
withMetadata(false),
withMetadataOrientation(-1),
withMetadataDensity(0.0),
timeoutSeconds(0),
convKernelWidth(0),
convKernelHeight(0),
convKernelScale(0.0),
@@ -308,6 +327,7 @@ struct PipelineBaton {
extractChannel(-1),
removeAlpha(false),
ensureAlpha(-1.0),
colourspaceInput(VIPS_INTERPRETATION_LAST),
colourspace(VIPS_INTERPRETATION_LAST),
pageHeight(0),
delay{-1},

View File

@@ -115,7 +115,7 @@ Napi::Value format(const Napi::CallbackInfo& info) {
Napi::Object format = Napi::Object::New(env);
for (std::string const f : {
"jpeg", "png", "webp", "tiff", "magick", "openslide", "dz",
"ppm", "fits", "gif", "svg", "heif", "pdf", "vips"
"ppm", "fits", "gif", "svg", "heif", "pdf", "vips", "jp2k"
}) {
// Input
Napi::Boolean hasInputFile =

View File

@@ -8,16 +8,18 @@
"test": "node perf && node random && node parallel"
},
"devDependencies": {
"async": "3.2.0",
"@squoosh/cli": "0.7.2",
"@squoosh/lib": "0.4.0",
"async": "3.2.1",
"benchmark": "2.1.4",
"gm": "1.23.1",
"imagemagick": "0.1.3",
"jimp": "0.16.1",
"mapnik": "4.5.6",
"semver": "7.3.4"
"mapnik": "4.5.8",
"semver": "7.3.5"
},
"license": "Apache-2.0",
"engines": {
"node": "14"
"node": "16"
}
}

View File

@@ -2,6 +2,7 @@
const os = require('os');
const fs = require('fs');
const { exec } = require('child_process');
const async = require('async');
const assert = require('assert');
@@ -13,6 +14,7 @@ const gm = require('gm');
const imagemagick = require('imagemagick');
const mapnik = require('mapnik');
const jimp = require('jimp');
const squoosh = require('@squoosh/lib');
const fixtures = require('../fixtures');
@@ -75,6 +77,65 @@ async.series({
});
}
});
// squoosh-cli
jpegSuite.add('squoosh-cli-file-file', {
defer: true,
fn: function (deferred) {
exec(`./node_modules/.bin/squoosh-cli \
--output-dir ${os.tmpdir()} \
--resize '{"enabled":true,"width":${width},"height":${height},"method":"lanczos3","premultiply":false,"linearRGB":false}' \
--mozjpeg '{"quality":80,"progressive":false,"optimize_coding":true,"quant_table":0,"trellis_multipass":false,"chroma_subsample":2,"separate_chroma_quality":false}' \
"${fixtures.inputJpg}"`, function (err) {
if (err) {
throw err;
}
deferred.resolve();
});
}
});
// squoosh-lib (GPLv3)
jpegSuite.add('squoosh-lib-buffer-buffer', {
defer: true,
fn: function (deferred) {
const pool = new squoosh.ImagePool();
const image = pool.ingestImage(inputJpgBuffer);
image.decoded
.then(function () {
return image.preprocess({
resize: {
enabled: true,
width,
height,
method: 'lanczos3',
premultiply: false,
linearRGB: false
}
});
})
.then(function () {
return image.encode({
mozjpeg: {
quality: 80,
progressive: false,
optimize_coding: true,
quant_table: 0,
trellis_multipass: false,
chroma_subsample: 2,
separate_chroma_quality: false
}
});
})
.then(function () {
return pool.close();
})
.then(function () {
return image.encodedWith.mozjpeg;
})
.then(function () {
deferred.resolve();
});
}
});
// mapnik
jpegSuite.add('mapnik-file-file', {
defer: true,

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 685 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

BIN
test/fixtures/gradients-rgb8.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 777 KiB

View File

@@ -2,7 +2,7 @@
const path = require('path');
const sharp = require('../../');
const maxColourDistance = require('../../build/Release/sharp')._maxColourDistance;
const maxColourDistance = require('../../lib/sharp')._maxColourDistance;
// Helpers
const getPath = function (filename) {
@@ -74,6 +74,7 @@ module.exports = {
inputJpgLossless: getPath('testimgl.jpg'), // Lossless JPEG from ftp://ftp.fu-berlin.de/unix/X11/graphics/ImageMagick/delegates/ljpeg-6b.tar.gz
inputPng: getPath('50020484-00001.png'), // http://c.searspartsdirect.com/lis_png/PLDM/50020484-00001.png
inputPngGradients: getPath('gradients-rgb8.png'),
inputPngWithTransparency: getPath('blackbug.png'), // public domain
inputPngCompleteTransparency: getPath('full-transparent.png'),
inputPngWithGreyAlpha: getPath('grey-8bit-alpha.png'),
@@ -91,6 +92,7 @@ module.exports = {
inputPngRGBWithAlpha: getPath('2569067123_aca715a2ee_o.png'), // http://www.flickr.com/photos/grizdave/2569067123/ (same as inputJpg)
inputPngImageInAlpha: getPath('image-in-alpha.png'), // https://github.com/lovell/sharp/issues/1597
inputPngSolidAlpha: getPath('with-alpha.png'), // https://github.com/lovell/sharp/issues/1599
inputPngP3: getPath('p3.png'), // https://github.com/lovell/sharp/issues/2862
inputWebP: getPath('4.webp'), // http://www.gstatic.com/webp/gallery/4.webp
inputWebPWithTransparency: getPath('5_webp_a.webp'), // http://www.gstatic.com/webp/gallery3/5_webp_a.webp
@@ -103,6 +105,8 @@ module.exports = {
inputTiffUncompressed: getPath('uncompressed_tiff.tiff'), // https://code.google.com/archive/p/imagetestsuite/wikis/TIFFTestSuite.wiki file: 0c84d07e1b22b76f24cccc70d8788e4a.tif
inputTiff8BitDepth: getPath('8bit_depth.tiff'),
inputTifftagPhotoshop: getPath('tifftag-photoshop.tiff'), // https://github.com/lovell/sharp/issues/1600
inputJp2: getPath('relax.jp2'), // https://www.fnordware.com/j2k/relax.jp2
inputGif: getPath('Crash_test.gif'), // http://upload.wikimedia.org/wikipedia/commons/e/e3/Crash_test.gif
inputGifGreyPlusAlpha: getPath('grey-plus-alpha.gif'), // http://i.imgur.com/gZ5jlmE.gif
inputGifAnimated: getPath('rotating-squares.gif'), // CC0 https://loading.io/spinner/blocks/-rotating-squares-preloader-gif

BIN
test/fixtures/p3.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 610 B

BIN
test/fixtures/relax.jp2 vendored Normal file

Binary file not shown.

View File

@@ -444,6 +444,22 @@
...
fun:vips__init
}
{
leak_libvips_thread_pool_new
Memcheck:Leak
match-leak-kinds: possible
fun:calloc
...
fun:g_system_thread_new
}
{
leak_libvips_thread_pool_push
Memcheck:Leak
match-leak-kinds: possible
fun:calloc
...
fun:g_thread_pool_push
}
{
leak_rsvg_static_data
Memcheck:Leak
@@ -480,6 +496,14 @@
...
fun:rsvg_handle_new_from_gfile_sync
}
{
leak_rsvg_rust_280_bytes_static_regex
Memcheck:Leak
match-leak-kinds: possible
fun:malloc
...
fun:rsvg_handle_get_dimensions_sub
}
# libuv warnings
{
@@ -871,3 +895,12 @@
...
fun:_ZN2v88internal18ArrayBufferSweeper10ReleaseAllEv
}
{
addr_v8_ZN2v88internal12_GLOBAL__N_119HandleApiCallHelperILb0EEENS0
Memcheck:Addr8
fun:strncmp
...
fun:_ZZN4node7binding6DLOpenERKN2v820FunctionCallbackInfoINS1_5ValueEEEENKUlPNS0_4DLibEE_clES8_
fun:_ZN4node7binding6DLOpenERKN2v820FunctionCallbackInfoINS1_5ValueEEE
fun:_ZN2v88internal12_GLOBAL__N_119HandleApiCallHelperILb0EEENS0_11MaybeHandleINS0_6ObjectEEEPNS0_7IsolateENS0_6HandleINS0_10HeapObjectEEESA_NS8_INS0_20FunctionTemplateInfoEEENS8_IS4_EENS0_16BuiltinArgumentsE
}

View File

@@ -3,7 +3,7 @@
const assert = require('assert');
const sharp = require('../../');
const { inputAvif, inputJpg } = require('../fixtures');
const { inputAvif, inputJpg, inputGifAnimated } = require('../fixtures');
describe('AVIF', () => {
it('called without options does not throw an error', () => {
@@ -81,4 +81,29 @@ describe('AVIF', () => {
width: 32
});
});
it('can convert animated GIF to non-animated AVIF', async () => {
const data = await sharp(inputGifAnimated, { animated: true })
.resize(10)
.avif({ speed: 8 })
.toBuffer();
const metadata = await sharp(data)
.metadata();
const { size, ...metadataWithoutSize } = metadata;
assert.deepStrictEqual(metadataWithoutSize, {
channels: 4,
compression: 'av1',
depth: 'uchar',
format: 'heif',
hasAlpha: true,
hasProfile: false,
height: 300,
isProgressive: false,
pageHeight: 300,
pagePrimary: 0,
pages: 1,
space: 'srgb',
width: 10
});
});
});

View File

@@ -90,7 +90,48 @@ describe('Colour space conversion', function () {
});
});
it('Invalid input', function () {
it('From sRGB with RGB16 pipeline, resize with gamma, to sRGB', function (done) {
sharp(fixtures.inputPngGradients)
.pipelineColourspace('rgb16')
.resize(320)
.gamma()
.toColourspace('srgb')
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(320, info.width);
fixtures.assertSimilar(fixtures.expected('colourspace-gradients-gamma-resize.png'), data, {
threshold: 0
}, done);
});
});
it('Convert P3 to sRGB', async () => {
const [r, g, b] = await sharp(fixtures.inputPngP3)
.raw()
.toBuffer();
assert.strictEqual(r, 255);
assert.strictEqual(g, 0);
assert.strictEqual(b, 0);
});
it('Passthrough P3', async () => {
const [r, g, b] = await sharp(fixtures.inputPngP3)
.withMetadata({ icc: 'p3' })
.raw()
.toBuffer();
assert.strictEqual(r, 234);
assert.strictEqual(g, 51);
assert.strictEqual(b, 34);
});
it('Invalid pipelineColourspace input', function () {
assert.throws(function () {
sharp(fixtures.inputJpg)
.pipelineColorspace(null);
}, /Expected string for colourspace but received null of type object/);
});
it('Invalid toColourspace input', function () {
assert.throws(function () {
sharp(fixtures.inputJpg)
.toColourspace(null);

View File

@@ -124,4 +124,30 @@ describe('Extend', function () {
fixtures.assertSimilar(fixtures.expected('extend-2channel.png'), data, done);
});
});
it('Premultiply background when compositing', async () => {
const background = '#bf1942cc';
const data = await sharp({
create: {
width: 1, height: 1, channels: 4, background: '#fff0'
}
})
.composite([{
input: {
create: {
width: 1, height: 1, channels: 4, background
}
}
}])
.extend({
left: 1, background
})
.raw()
.toBuffer();
const [r1, g1, b1, a1, r2, g2, b2, a2] = data;
assert.strictEqual(true, Math.abs(r2 - r1) < 2);
assert.strictEqual(true, Math.abs(g2 - g1) < 2);
assert.strictEqual(true, Math.abs(b2 - b1) < 2);
assert.strictEqual(true, Math.abs(a2 - a1) < 2);
});
});

View File

@@ -57,8 +57,8 @@ describe('Image channel extraction', function () {
it('With colorspace conversion', function (done) {
const output = fixtures.path('output.extract-lch.jpg');
sharp(fixtures.inputJpg)
.toColourspace('lch')
.extractChannel(1)
.toColourspace('lch')
.resize(320, 240, { fastShrinkOnLoad: false })
.toFile(output, function (err, info) {
if (err) throw err;
@@ -70,12 +70,13 @@ describe('Image channel extraction', function () {
});
it('Alpha from 16-bit PNG', function (done) {
const output = fixtures.path('output.extract-alpha-16bit.jpg');
const output = fixtures.path('output.extract-alpha-16bit.png');
sharp(fixtures.inputPngWithTransparency16bit)
.resize(16)
.extractChannel(3)
.toFile(output, function (err, info) {
.toFile(output, function (err) {
if (err) throw err;
fixtures.assertMaxColourDistance(output, fixtures.expected('extract-alpha-16bit.jpg'));
fixtures.assertMaxColourDistance(output, fixtures.expected('extract-alpha-16bit.png'));
done();
});
});

View File

@@ -57,7 +57,7 @@ describe('HEIF', () => {
});
it('out of range speed should throw an error', () => {
assert.throws(() => {
sharp().heif({ speed: 9 });
sharp().heif({ speed: 10 });
});
});
it('invalid speed should throw an error', () => {

View File

@@ -1,6 +1,7 @@
'use strict';
const fs = require('fs');
const path = require('path');
const assert = require('assert');
const rimraf = require('rimraf');
@@ -297,6 +298,21 @@ describe('Input/output', function () {
});
});
it('Support output to tif format', function (done) {
sharp(fixtures.inputTiff)
.resize(320, 240)
.toFormat('tif')
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(true, data.length > 0);
assert.strictEqual(data.length, info.size);
assert.strictEqual('tiff', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
done();
});
});
it('Fail when output File is input File', function (done) {
sharp(fixtures.inputJpg).toFile(fixtures.inputJpg, function (err) {
assert(err instanceof Error);
@@ -316,6 +332,48 @@ describe('Input/output', function () {
});
});
it('Fail when output File is input File (relative output, absolute input)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(fixtures.inputJpg).toFile(relativePath, function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is input File via Promise (relative output, absolute input)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(fixtures.inputJpg).toFile(relativePath).then(function (data) {
assert(false);
done();
}).catch(function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is input File (relative input, absolute output)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(relativePath).toFile(fixtures.inputJpg, function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is input File via Promise (relative input, absolute output)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(relativePath).toFile(fixtures.inputJpg).then(function (data) {
assert(false);
done();
}).catch(function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is empty', function (done) {
sharp(fixtures.inputJpg).toFile('', function (err) {
assert(err instanceof Error);

99
test/unit/jp2.js Normal file
View File

@@ -0,0 +1,99 @@
'use strict';
const fs = require('fs');
const assert = require('assert');
const sharp = require('../../');
const fixtures = require('../fixtures');
describe('JP2 output', () => {
if (!sharp.format.jp2k.input.buffer) {
it('JP2 output should fail due to missing OpenJPEG', () => {
assert.rejects(() =>
sharp(fixtures.inputJpg)
.jp2()
.toBuffer(),
/JP2 output requires libvips with support for OpenJPEG/
);
});
it('JP2 file output should fail due to missing OpenJPEG', () => {
assert.rejects(async () => await sharp().toFile('test.jp2'),
/JP2 output requires libvips with support for OpenJPEG/
);
});
} else {
it('JP2 Buffer to PNG Buffer', () => {
sharp(fs.readFileSync(fixtures.inputJp2))
.resize(8, 15)
.png()
.toBuffer({ resolveWithObject: true })
.then(({ data, info }) => {
assert.strictEqual(true, data.length > 0);
assert.strictEqual(data.length, info.size);
assert.strictEqual('png', info.format);
assert.strictEqual(8, info.width);
assert.strictEqual(15, info.height);
assert.strictEqual(4, info.channels);
});
});
it('JP2 quality', function (done) {
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ quality: 70 })
.toBuffer(function (err, buffer70) {
if (err) throw err;
sharp(fixtures.inputJp2)
.resize(320, 240)
.toBuffer(function (err, buffer80) {
if (err) throw err;
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ quality: 90 })
.toBuffer(function (err, buffer90) {
if (err) throw err;
assert(buffer70.length < buffer80.length);
assert(buffer80.length < buffer90.length);
done();
});
});
});
});
it('Without chroma subsampling generates larger file', function (done) {
// First generate with chroma subsampling (default)
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ chromaSubsampling: '4:2:0' })
.toBuffer(function (err, withChromaSubsamplingData, withChromaSubsamplingInfo) {
if (err) throw err;
assert.strictEqual(true, withChromaSubsamplingData.length > 0);
assert.strictEqual(withChromaSubsamplingData.length, withChromaSubsamplingInfo.size);
assert.strictEqual('jp2', withChromaSubsamplingInfo.format);
assert.strictEqual(320, withChromaSubsamplingInfo.width);
assert.strictEqual(240, withChromaSubsamplingInfo.height);
// Then generate without
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ chromaSubsampling: '4:4:4' })
.toBuffer(function (err, withoutChromaSubsamplingData, withoutChromaSubsamplingInfo) {
if (err) throw err;
assert.strictEqual(true, withoutChromaSubsamplingData.length > 0);
assert.strictEqual(withoutChromaSubsamplingData.length, withoutChromaSubsamplingInfo.size);
assert.strictEqual('jp2', withoutChromaSubsamplingInfo.format);
assert.strictEqual(320, withoutChromaSubsamplingInfo.width);
assert.strictEqual(240, withoutChromaSubsamplingInfo.height);
assert.strictEqual(true, withChromaSubsamplingData.length <= withoutChromaSubsamplingData.length);
done();
});
});
});
it('Invalid JP2 chromaSubsampling value throws error', function () {
assert.throws(function () {
sharp().jpeg({ chromaSubsampling: '4:2:2' });
});
});
}
});

View File

@@ -247,7 +247,7 @@ describe('Image metadata', function () {
assert.strictEqual('undefined', typeof metadata.size);
assert.strictEqual(800, metadata.width);
assert.strictEqual(533, metadata.height);
assert.strictEqual(true, [3, 4].includes(metadata.channels)); // libvips 8.11.0 = 4
assert.strictEqual(3, metadata.channels);
assert.strictEqual('uchar', metadata.depth);
assert.strictEqual('undefined', typeof metadata.density);
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
@@ -256,6 +256,7 @@ describe('Image metadata', function () {
assert.strictEqual('undefined', typeof metadata.orientation);
assert.strictEqual('undefined', typeof metadata.exif);
assert.strictEqual('undefined', typeof metadata.icc);
assert.deepStrictEqual(metadata.background, { r: 138, g: 148, b: 102 });
done();
});
});
@@ -266,7 +267,7 @@ describe('Image metadata', function () {
assert.strictEqual('undefined', typeof metadata.size);
assert.strictEqual(2, metadata.width);
assert.strictEqual(1, metadata.height);
assert.strictEqual(true, [2, 4].includes(metadata.channels)); // libvips 8.11.0 = 4
assert.strictEqual(4, metadata.channels);
assert.strictEqual('uchar', metadata.depth);
assert.strictEqual('undefined', typeof metadata.density);
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
@@ -285,7 +286,7 @@ describe('Image metadata', function () {
.then(({
format, width, height, space, channels, depth,
isProgressive, pages, pageHeight, loop, delay,
hasProfile, hasAlpha
background, hasProfile, hasAlpha
}) => {
assert.strictEqual(format, 'gif');
assert.strictEqual(width, 80);
@@ -298,6 +299,7 @@ describe('Image metadata', function () {
assert.strictEqual(pageHeight, 80);
assert.strictEqual(loop, 0);
assert.deepStrictEqual(delay, Array(30).fill(30));
assert.deepStrictEqual(background, { r: 0, g: 0, b: 0 });
assert.strictEqual(hasProfile, false);
assert.strictEqual(hasAlpha, true);
})
@@ -320,7 +322,7 @@ describe('Image metadata', function () {
assert.strictEqual(isProgressive, false);
assert.strictEqual(pages, 10);
assert.strictEqual(pageHeight, 285);
assert.strictEqual(true, [2, 3].includes(loop)); // libvips 8.11.0 = 2
assert.strictEqual(loop, 2);
assert.deepStrictEqual(delay, [...Array(9).fill(3000), 15000]);
assert.strictEqual(hasProfile, false);
assert.strictEqual(hasAlpha, true);
@@ -724,6 +726,25 @@ describe('Image metadata', function () {
})
);
it('AVIF', async () => {
const metadata = await sharp(fixtures.inputAvif).metadata();
assert.deepStrictEqual(metadata, {
format: 'heif',
width: 2048,
height: 858,
space: 'srgb',
channels: 3,
depth: 'uchar',
isProgressive: false,
pages: 1,
pageHeight: 858,
pagePrimary: 0,
compression: 'av1',
hasProfile: false,
hasAlpha: false
});
});
it('File input with corrupt header fails gracefully', function (done) {
sharp(fixtures.inputJpgWithCorruptHeader)
.metadata(function (err) {

View File

@@ -18,7 +18,9 @@ describe('Modulate', function () {
{ saturation: null },
{ hue: '50deg' },
{ hue: 1.5 },
{ hue: null }
{ hue: null },
{ lightness: '+50' },
{ lightness: null }
].forEach(function (options) {
it('should throw', function () {
assert.throws(function () {
@@ -108,6 +110,22 @@ describe('Modulate', function () {
assert.deepStrictEqual({ r: 127, g: 83, b: 81 }, { r, g, b });
});
it('should be able to lighten', async () => {
const [r, g, b] = await sharp({
create: {
width: 1,
height: 1,
channels: 3,
background: { r: 153, g: 68, b: 68 }
}
})
.modulate({ lightness: 10 })
.raw()
.toBuffer();
assert.deepStrictEqual({ r: 182, g: 93, b: 92 }, { r, g, b });
});
it('should be able to modulate all channels', async () => {
const [r, g, b] = await sharp({
create: {

View File

@@ -107,4 +107,88 @@ describe('Negate', function () {
done();
});
});
it('negate ({alpha: true})', function (done) {
sharp(fixtures.inputJpg)
.resize(320, 240)
.negate({ alpha: true })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate.jpg'), data, done);
});
});
it('negate non-alpha channels (png)', function (done) {
sharp(fixtures.inputPng)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('png', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha.png'), data, done);
});
});
it('negate non-alpha channels (png, trans)', function (done) {
sharp(fixtures.inputPngWithTransparency)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('png', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha-trans.png'), data, done);
});
});
it('negate non-alpha channels (png, alpha)', function (done) {
sharp(fixtures.inputPngWithGreyAlpha)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('png', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha-grey.png'), data, done);
});
});
it('negate non-alpha channels (webp)', function (done) {
sharp(fixtures.inputWebP)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('webp', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha.webp'), data, done);
});
});
it('negate non-alpha channels (webp, trans)', function (done) {
sharp(fixtures.inputWebPWithTransparency)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('webp', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha-trans.webp'), data, done);
});
});
it('invalid alpha value', function () {
assert.throws(function () {
sharp(fixtures.inputWebPWithTransparency).negate({ alpha: 'non-bool' });
});
});
});

View File

@@ -179,7 +179,7 @@ describe('Raw pixel data', function () {
});
});
describe('Ouput raw, uncompressed image data', function () {
describe('Output raw, uncompressed image data', function () {
it('1 channel greyscale image', function (done) {
sharp(fixtures.inputJpg)
.greyscale()
@@ -227,7 +227,7 @@ describe('Raw pixel data', function () {
});
});
it('extract A from RGBA', () =>
it('Extract A from RGBA', () =>
sharp(fixtures.inputPngWithTransparency)
.resize(32, 24)
.extractChannel(3)
@@ -241,4 +241,41 @@ describe('Raw pixel data', function () {
})
);
});
describe('Raw pixel depths', function () {
it('Invalid depth', function () {
assert.throws(function () {
sharp(Buffer.alloc(3), { raw: { width: 1, height: 1, channels: 3 } })
.raw({ depth: 'zoinks' });
});
});
for (const { constructor, depth, bits } of [
{ constructor: Uint8Array, depth: undefined, bits: 8 },
{ constructor: Uint8Array, depth: 'uchar', bits: 8 },
{ constructor: Uint8ClampedArray, depth: 'uchar', bits: 8 },
{ constructor: Int8Array, depth: 'char', bits: 8 },
{ constructor: Uint16Array, depth: 'ushort', bits: 16 },
{ constructor: Int16Array, depth: 'short', bits: 16 },
{ constructor: Uint32Array, depth: 'uint', bits: 32 },
{ constructor: Int32Array, depth: 'int', bits: 32 },
{ constructor: Float32Array, depth: 'float', bits: 32 },
{ constructor: Float64Array, depth: 'double', bits: 64 }
]) {
it(constructor.name, () =>
sharp(new constructor(3), { raw: { width: 1, height: 1, channels: 3 } })
.raw({ depth })
.toBuffer({ resolveWithObject: true })
.then(({ data, info }) => {
assert.strictEqual(1, info.width);
assert.strictEqual(1, info.height);
assert.strictEqual(3, info.channels);
if (depth !== undefined) {
assert.strictEqual(depth, info.depth);
}
assert.strictEqual(data.length / 3, bits / 8);
})
);
}
});
});

View File

@@ -605,6 +605,40 @@ describe('Resize dimensions', function () {
});
});
it('Ensure embedded shortest edge (height) is at least 1 pixel', function () {
return sharp({
create: {
width: 200,
height: 1,
channels: 3,
background: 'red'
}
})
.resize({ width: 50, height: 50, fit: sharp.fit.contain })
.toBuffer({ resolveWithObject: true })
.then(function (output) {
assert.strictEqual(50, output.info.width);
assert.strictEqual(50, output.info.height);
});
});
it('Ensure embedded shortest edge (width) is at least 1 pixel', function () {
return sharp({
create: {
width: 1,
height: 200,
channels: 3,
background: 'red'
}
})
.resize({ width: 50, height: 50, fit: sharp.fit.contain })
.toBuffer({ resolveWithObject: true })
.then(function (output) {
assert.strictEqual(50, output.info.width);
assert.strictEqual(50, output.info.height);
});
});
it('Skip shrink-on-load where one dimension <4px', async () => {
const jpeg = await sharp({
create: {

View File

@@ -423,20 +423,20 @@ describe('Image Stats', function () {
assert.strictEqual(true, isInRange(stats.channels[0].maxY, 0, 1));
// alpha channel
assert.strictEqual(0, stats.channels[1].min);
assert.strictEqual(255, stats.channels[1].max);
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].sum, 255));
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].squaresSum, 65025));
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].mean, 127.5));
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].stdev, 180.31222920256963));
assert.strictEqual(true, isInteger(stats.channels[1].minX));
assert.strictEqual(true, isInRange(stats.channels[1].minX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[1].minY));
assert.strictEqual(true, isInRange(stats.channels[1].minY, 0, 1));
assert.strictEqual(true, isInteger(stats.channels[1].maxX));
assert.strictEqual(true, isInRange(stats.channels[1].maxX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[1].maxY));
assert.strictEqual(true, isInRange(stats.channels[1].maxY, 0, 1));
assert.strictEqual(0, stats.channels[3].min);
assert.strictEqual(255, stats.channels[3].max);
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].sum, 255));
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].squaresSum, 65025));
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].mean, 127.5));
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].stdev, 180.31222920256963));
assert.strictEqual(true, isInteger(stats.channels[3].minX));
assert.strictEqual(true, isInRange(stats.channels[3].minX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[3].minY));
assert.strictEqual(true, isInRange(stats.channels[3].minY, 0, 1));
assert.strictEqual(true, isInteger(stats.channels[3].maxX));
assert.strictEqual(true, isInRange(stats.channels[3].maxX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[3].maxY));
assert.strictEqual(true, isInRange(stats.channels[3].maxY, 0, 1));
done();
});

View File

@@ -188,6 +188,26 @@ describe('TIFF', function () {
)
);
it('TIFF imputes xres and yres from withMetadataDensity if not explicitly provided', async () => {
const data = await sharp(fixtures.inputTiff)
.resize(8, 8)
.tiff()
.withMetadata({ density: 600 })
.toBuffer();
const { density } = await sharp(data).metadata();
assert.strictEqual(600, density);
});
it('TIFF uses xres and yres over withMetadataDensity if explicitly provided', async () => {
const data = await sharp(fixtures.inputTiff)
.resize(8, 8)
.tiff({ xres: 1000, yres: 1000 })
.withMetadata({ density: 600 })
.toBuffer();
const { density } = await sharp(data).metadata();
assert.strictEqual(25400, density);
});
it('TIFF invalid xres value should throw an error', function () {
assert.throws(function () {
sharp().tiff({ xres: '1000.0' });

26
test/unit/timeout.js Normal file
View File

@@ -0,0 +1,26 @@
'use strict';
const assert = require('assert');
const sharp = require('../../');
const fixtures = require('../fixtures');
describe('Timeout', function () {
it('Will timeout after 1s when performing slow blur operation', () => assert.rejects(
() => sharp(fixtures.inputJpg)
.blur(100)
.timeout({ seconds: 1 })
.toBuffer(),
/timeout: [0-9]+% complete/
));
it('invalid object', () => assert.throws(
() => sharp().timeout('fail'),
/Expected object for options but received fail of type string/
));
it('invalid seconds', () => assert.throws(
() => sharp().timeout({ seconds: 'fail' }),
/Expected integer between 0 and 3600 for seconds but received fail of type string/
));
});

View File

@@ -35,7 +35,7 @@ describe('WebP', function () {
it('should work for webp alpha quality', function (done) {
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
.webp({ alphaQuality: 80 })
.webp({ alphaQuality: 80, reductionEffort: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(true, data.length > 0);
@@ -46,7 +46,7 @@ describe('WebP', function () {
it('should work for webp lossless', function (done) {
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
.webp({ lossless: true })
.webp({ lossless: true, reductionEffort: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(true, data.length > 0);
@@ -57,7 +57,7 @@ describe('WebP', function () {
it('should work for webp near-lossless', function (done) {
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
.webp({ nearLossless: true, quality: 50 })
.webp({ nearLossless: true, quality: 50, reductionEffort: 0 })
.toBuffer(function (err50, data50, info50) {
if (err50) throw err50;
assert.strictEqual(true, data50.length > 0);
@@ -68,7 +68,7 @@ describe('WebP', function () {
it('should use near-lossless when both lossless and nearLossless are specified', function (done) {
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
.webp({ nearLossless: true, quality: 50, lossless: true })
.webp({ nearLossless: true, quality: 50, lossless: true, reductionEffort: 0 })
.toBuffer(function (err50, data50, info50) {
if (err50) throw err50;
assert.strictEqual(true, data50.length > 0);
@@ -189,7 +189,7 @@ describe('WebP', function () {
it('should work with streams when only animated is set', function (done) {
fs.createReadStream(fixtures.inputWebPAnimated)
.pipe(sharp({ animated: true }))
.webp({ lossless: true })
.webp({ lossless: true, reductionEffort: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(true, data.length > 0);
@@ -201,7 +201,7 @@ describe('WebP', function () {
it('should work with streams when only pages is set', function (done) {
fs.createReadStream(fixtures.inputWebPAnimated)
.pipe(sharp({ pages: -1 }))
.webp({ lossless: true })
.webp({ lossless: true, reductionEffort: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(true, data.length > 0);
@@ -209,4 +209,24 @@ describe('WebP', function () {
fixtures.assertSimilar(fixtures.inputWebPAnimated, data, done);
});
});
it('should remove animation properties when loading single page', async () => {
const data = await sharp(fixtures.inputGifAnimatedLoop3)
.resize({ height: 570 })
.webp({ reductionEffort: 0 })
.toBuffer();
const metadata = await sharp(data).metadata();
assert.deepStrictEqual(metadata, {
format: 'webp',
size: 2580,
width: 740,
height: 570,
space: 'srgb',
channels: 3,
depth: 'uchar',
isProgressive: false,
hasProfile: false,
hasAlpha: false
});
});
});