Compare commits

...

109 Commits

Author SHA1 Message Date
Lovell Fuller
1ff84b20b7 Release v0.29.3 2021-11-14 11:40:19 +00:00
Lovell Fuller
97655d2dfd Bump deps 2021-11-14 09:17:44 +00:00
Michael B. Klein
d10d7b02d4 Docs: remove duplicate entry for mbklein (#2971) 2021-11-11 19:10:44 +00:00
Lovell Fuller
2ffdae2914 Docs: changelog and credit for #2952 2021-11-08 19:43:49 +00:00
Michael B. Klein
342de36973 Impute TIFF xres/yres from withMetadata({density}) 2021-11-08 19:43:42 +00:00
Lovell Fuller
b33231d4bd Ensure correct dimensions when contain 1px image #2951 2021-11-07 16:35:30 +00:00
Lovell Fuller
319db21f29 Release v0.29.2 2021-10-21 09:15:21 +01:00
Lovell Fuller
d359331426 Remove animation props from single page images #2890 2021-10-18 20:27:10 +01:00
Lovell Fuller
7ae151362b Bump devDeps 2021-10-17 15:17:50 +01:00
Lovell Fuller
648a1e05da Throw error rather than exit for invalid binaries #2931 2021-10-17 15:14:40 +01:00
Lovell Fuller
b9f211fe34 Docs: changelog for #2918 2021-10-17 15:11:38 +01:00
Dmitri Pyatkov
e475d9e47f Improve error message on Windows for version conflict (#2918) 2021-10-17 14:10:28 +01:00
Lovell Fuller
f37ca8249a Bump deps 2021-09-22 11:41:22 +01:00
Lovell Fuller
1dd4be670d Add timeout function to limit processing time 2021-09-22 10:33:59 +01:00
Lovell Fuller
197d4cf835 Docs: changelog and credit for #2893 2021-09-22 10:31:12 +01:00
Lovell Fuller
83eed86b53 Docs: clarify prebuilt libc support on ARMv6/v7 2021-09-22 10:08:52 +01:00
Lovell Fuller
bbf612cb9e Replace use of deprecated util.inherits 2021-09-22 10:08:44 +01:00
Erlend
2679bb567b Allow use of 'tif' to select TIFF output (#2893) 2021-09-16 18:49:14 +01:00
Lovell Fuller
481e350f39 Ensure 'versions' is populated from vendored libvips 2021-09-07 11:21:00 +01:00
Lovell Fuller
50c7a08754 Release v0.29.1 2021-09-07 10:23:50 +01:00
Lovell Fuller
9a0bb60737 Bump deps 2021-09-07 10:21:51 +01:00
Lovell Fuller
deb5d81221 Docs: changelog entries for #2878 #2879 2021-09-06 16:30:31 +01:00
Espen Hovlandsdal
916b04dbac Allow using speed 9 for AVIF/HEIC encoding (#2879) 2021-09-06 16:23:02 +01:00
Espen Hovlandsdal
52307fad5d Resolve paths before comparing input/output destination (#2878)
This fixes an issue where if you try to write to the same destination as the
input file but you are not using absolute (or the same relative path) for both
the input and output, sharp/vips might produce errors such as:

someFile.jpg: unable to open for write
unix error: No such file or directory
2021-09-06 16:21:43 +01:00
Lovell Fuller
afb21135c2 Docs: add changelog entry for #2868 2021-09-05 09:35:46 +01:00
Zaruike
b7fbffb3f7 Add support for libvips compiled with OpenJPEG 2021-09-05 09:32:02 +01:00
Lovell Fuller
5d98bcd8d8 Remove unsupported animation props from AVIF #2870 2021-09-05 08:46:15 +01:00
Lovell Fuller
e044788f63 Docs: changelog and credit for #2846 2021-08-30 20:31:10 +01:00
Tenpi
4a9267ce12 Add lightness option to modulate operation 2021-08-30 20:22:41 +01:00
Lovell Fuller
104464c2e0 Ensure images with P3 profiles retain full gamut #2862 2021-08-30 17:15:17 +01:00
Lovell Fuller
60adc110f5 Ensure background is premultiplied when compositing #2858 2021-08-29 16:40:40 +01:00
Paul Straw
2031d7d112 Ensure compatibility with ImageMagick 7 (#2865) 2021-08-28 20:17:44 +01:00
Lovell Fuller
3402656ec5 Set PNG bitdepth based on number of colours #2855
Removes use of deprecated libvips API
2021-08-26 22:05:29 +01:00
Lovell Fuller
4e84f743e4 Docs: toFile expects directory structure to exist 2021-08-20 09:22:22 +01:00
Lovell Fuller
17e50de5f0 Docs: serve docute from same hostname
Cheapo corporate web proxies ignore CSP and rewrite HTML
2021-08-19 18:58:17 +01:00
Lovell Fuller
978a788f40 CI: ensure Linux ARM64 prebuild token is passed into container 2021-08-17 14:49:57 +01:00
Lovell Fuller
6e91d55971 CI: ensure Linux ARM64 prebuild token is passed into container 2021-08-17 14:33:29 +01:00
Lovell Fuller
d4ce0a1e36 Update prebuild include regex 2021-08-17 11:23:24 +01:00
Lovell Fuller
148608b377 Release v0.29.0 2021-08-17 11:16:04 +01:00
Lovell Fuller
f725f4acb7 Update performance test results 2021-08-17 10:23:45 +01:00
Lovell Fuller
d07a549438 Tests: add squoosh-cli and squoosh-lib to performance benchmarks 2021-08-16 20:33:33 +01:00
Lovell Fuller
551441cedd Bench: bump deps 2021-08-16 19:36:42 +01:00
Lovell Fuller
46c14e939b Tests: add a few new leak suppressions 2021-08-16 19:01:39 +01:00
Lovell Fuller
6084647795 Upgrade to libvips v8.11.3 2021-08-15 19:45:08 +01:00
Lovell Fuller
e0a598ae62 Bump deps 2021-08-15 19:28:49 +01:00
Lovell Fuller
28833eb04a Upgrade to libvips v8.11.3-alpha1 2021-08-15 08:35:27 +01:00
Lovell Fuller
b24c9c86d1 Docs: changelog and credit for #2762 2021-08-03 15:28:50 +01:00
Mart
b7add480c7 Add support for bit depth with raw input and output (#2762)
* Determine input raw pixel depth from the given typed array
* Allow pixel depth to be set on raw output
2021-08-03 14:52:54 +01:00
Lovell Fuller
eabb671b10 Docs: minimum Node.js version requirement of 12.13.0 2021-08-03 10:25:04 +01:00
Lovell Fuller
513ed02b76 Docs: changelog entry for #2808 2021-08-02 21:37:54 +01:00
Espen Hovlandsdal
b7ddbe71f7 Add support for negating only non-alpha channels
Fixes #1035
2021-08-02 21:19:56 +01:00
Lovell Fuller
21d1a7ca62 CI: Add darwin-arm64 via MacStadium-based runner 2021-07-23 10:58:00 +01:00
Lovell Fuller
4c2d28a7ad Bump dep: color 2021-07-19 15:30:12 +01:00
Lovell Fuller
2afec9e3ed Docs: rebuild to include commit 6979042 2021-07-19 15:30:01 +01:00
reiv
69790421b7 Docs: add stats usage note and example (#2803) (#2804)
Clarify that stats are derived from the original input image and that operations need to be buffered in order to obtain stats from the resulting image.
2021-07-19 15:25:36 +01:00
Lovell Fuller
3f08f6a359 Add default background metadata for PNG and GIF images 2021-07-19 14:55:22 +01:00
Lovell Fuller
719c2db8da Bump devDeps 2021-07-14 19:33:24 +01:00
Lovell Fuller
a9aa55c32d Ensure pipelineColourspace is applied to all inputs #2704 2021-07-14 19:22:31 +01:00
Lovell Fuller
3f6d9d6ee9 Upgrade to libvips v8.11.2-alpha1 2021-07-14 13:45:47 +01:00
Lovell Fuller
b32568159f Docs: changelog and credit for #2704 2021-07-14 13:45:47 +01:00
Daiz
bb48d0d857 Add pipelineColourspace operator 2021-07-14 13:45:47 +01:00
Lovell Fuller
536412515f Allow installation directory to contain spaces
This previously worked, but regressed in v0.26.0.
2021-07-14 13:45:47 +01:00
Lovell Fuller
fcc6eaadd3 Tests: reduce WebP CPU time, ~2s faster 2021-07-14 13:45:47 +01:00
Lovell Fuller
7dc78e8796 Bump deps 2021-07-14 13:45:47 +01:00
Lovell Fuller
c65de3fe6d Default to single-channel output from extractChannel #2658 2021-07-14 13:45:47 +01:00
Lovell Fuller
d000f57773 Add compression property to HEIF image metadata #2504 2021-07-14 13:45:47 +01:00
Lovell Fuller
75cddbdb6d Default AVIF encoding to 4:4:4 chroma subsampling #2562 2021-07-14 13:45:47 +01:00
Lovell Fuller
e418d91511 Test: correct coverage syntax 2021-07-14 13:45:47 +01:00
Lovell Fuller
6c2e6c5432 Install: multiple platform-arch binaries in same tree 2021-07-14 13:45:47 +01:00
Lovell Fuller
8c6d9fdc62 Install: always use Brotli-compressed tarballs 2021-07-14 13:45:47 +01:00
Lovell Fuller
468e95427e CI: migrate ARM64 builds from TravisCI to CircleCI #2665 2021-07-14 13:45:47 +01:00
Lovell Fuller
6d7a5ace6b Drop support for Node.js 10, upgrade to Node-API v5 2021-07-14 13:45:47 +01:00
Lovell Fuller
cbaec198a5 Upgrade to libvips v8.11.0-rc1 2021-07-14 13:45:47 +01:00
Lovell Fuller
7467fa8b50 Docs: serve markdown files via Firebase instead of jsdelivr
Might make a few corporate web proxies happier
2021-06-21 17:10:44 +01:00
Lovell Fuller
61640fb5c7 CI: use gcc 10 on Linux x64 2021-06-03 18:52:52 +01:00
Lovell Fuller
19cb4b62b0 Docs: add security-related response headers 2021-06-02 09:12:02 +01:00
Lovell Fuller
81ee8bc30f Docs: make keyword search case insensitive 2021-06-02 09:11:08 +01:00
Lovell Fuller
9f384e1c6c Release v0.28.3 2021-05-24 16:26:34 +01:00
Lovell Fuller
35e8c8b25e Docs: ensure ops without examples are indexed 2021-05-24 16:24:04 +01:00
Lovell Fuller
dc53f1baff Bump deps 2021-05-24 16:08:10 +01:00
Lovell Fuller
70139600b5 Docs: fix CLAHE link 2021-05-23 18:49:54 +01:00
Lovell Fuller
1b4d1521e0 Docs: cross-link removeAlpha and flatten 2021-05-23 18:46:23 +01:00
Lovell Fuller
ed3377cb2d Docs: add parameter names to search keywords 2021-05-23 18:02:18 +01:00
Lovell Fuller
d72852b3aa Docs: changelog entry for #2726 2021-05-23 17:45:43 +01:00
Brad Parham
4b6b6189bf Add contrast limiting adaptive histogram equalization (CLAHE) operator (#2726) 2021-05-23 17:36:04 +01:00
Lovell Fuller
b69a54fc75 Ensure presence of libvips before invoking node-gyp 2021-05-16 19:40:43 +01:00
Lovell Fuller
81e388a4cc Docs: composite supports failOnError and limitInputPixels 2021-05-15 14:52:33 +01:00
Lovell Fuller
5bd5e5052a Skip shrink-on-load for multi-page WebP #2714 2021-05-15 14:13:16 +01:00
Lovell Fuller
a2d3fa729f Release v0.28.2 2021-05-10 17:59:50 +01:00
Lovell Fuller
cb6811bc47 CI: FreeBSD skip notifications 2021-05-10 17:44:15 +01:00
Lovell Fuller
53c6e80869 Docs: refresh index 2021-05-10 16:03:40 +01:00
Lovell Fuller
e71dca586c Bump devDeps 2021-05-10 16:03:08 +01:00
Lovell Fuller
b3cd48db5f Docs: add section about cross-platform installation 2021-05-10 14:41:28 +01:00
Sebastian
476448b9d4 Install: allow cross-libc via sharp-install-force flag (#2692) 2021-05-10 09:55:20 +01:00
Lovell Fuller
070534df5b Docs: changelog for #2685 2021-05-03 19:48:15 +01:00
Michael Nutt
9a1e8ed574 Add premultiplied boolean flag for raw pixel data input (#2685) 2021-05-03 19:30:37 +01:00
Lovell Fuller
309918a878 Move lint-related tasks to dedicated script entry 2021-05-03 10:04:19 +01:00
Lovell Fuller
cac83b94c1 Bump deps and docs refresh 2021-05-03 10:01:43 +01:00
Lovell Fuller
9c06df08a1 Docs: changelog entry for #2687 2021-05-03 10:01:17 +01:00
Jacob
52e4543d31 Detect empty input and throw a helpful error (#2687) 2021-05-03 09:29:51 +01:00
Lovell Fuller
a688468378 CI: replace Node.js 15 with 16 2021-05-01 16:24:47 +01:00
Lovell Fuller
e1760d64fb Tests: updates so latest libvips master branch passes 2021-05-01 15:25:57 +01:00
Lovell Fuller
84d4e3cf8f Require specific semver functions, aids tree-shaking 2021-04-30 20:42:46 +01:00
Raj Rajhans
f8a76372ad Docs: rewrite sentence to avoid grammatical ambiguity (#2668) 2021-04-21 14:10:24 +01:00
Lovell Fuller
4237f5520f Allow withMetadata to set density #967 2021-04-17 13:46:54 +01:00
Lovell Fuller
8c0c01c702 Docs: changelog entry for #2664 2021-04-17 09:11:48 +01:00
msalettes
9c100830e0 Allow escaped proxy credentials (#2664) 2021-04-17 08:49:07 +01:00
Lovell Fuller
ed5d753b89 Skip shrink-on-load where one dimension <4px #2653 2021-04-07 21:26:16 +01:00
Timo Hausmann
d1ca756bd8 Docs: correct flatten example to use object instead of string (#2654) 2021-04-06 17:21:14 +01:00
102 changed files with 3030 additions and 933 deletions

77
.circleci/config.yml Normal file
View File

@@ -0,0 +1,77 @@
version: 2.1
workflows:
build:
jobs:
- linux-arm64-glibc-node-12:
filters:
tags:
only: /^v.*/
- linux-arm64-musl-node-12:
filters:
tags:
only: /^v.*/
- linux-arm64-glibc-node-16:
filters:
tags:
only: /^v.*/
- linux-arm64-musl-node-16:
filters:
tags:
only: /^v.*/
jobs:
linux-arm64-glibc-node-12:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_12.x sid main' >/etc/apt/sources.list.d/nodesource.list"
sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"
- run: "[[ -n $CIRCLE_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 5 --upload=$prebuild_upload\" || true"
linux-arm64-glibc-node-16:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo chown 0.0 ${PWD}
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_16.x sid main' >/etc/apt/sources.list.d/nodesource.list"
sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"
linux-arm64-musl-node-12:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:12-alpine3.11
sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"
- run: "[[ -n $CIRCLE_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 5 --upload=$prebuild_upload\" || true"
linux-arm64-musl-node-16:
resource_class: arm.medium
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- run: |
sudo chown 0.0 ${PWD}
sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:16-alpine3.11
sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
- run: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
- run: sudo docker exec sharp sh -c "npm test"

View File

@@ -5,6 +5,7 @@ task:
name: FreeBSD 13.0
env:
IGNORE_OSVERSION: yes
skip_notifications: true
prerequisites_script:
- pkg update -f
- pkg upgrade -y

27
.github/workflows/ci-darwin-arm64v8.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: CI (MacStadium)
on:
- push
- pull_request
jobs:
CI:
runs-on: macos-m1
defaults:
run:
shell: /usr/bin/arch -arch arm64e /bin/bash -l {0}
steps:
- name: Dependencies
uses: actions/setup-node@v2
with:
node-version: 16
architecture: arm64
- name: Checkout
uses: actions/checkout@v2
- name: Install
run: npm install --build-from-source --unsafe-perm
- name: Test
run: npm test
- name: Prebuild
if: startsWith(github.ref, 'refs/tags/')
env:
prebuild_upload: ${{ secrets.GITHUB_TOKEN }}
run: npx prebuild --runtime napi --target 5

View File

@@ -1,3 +1,4 @@
name: CI (GitHub)
on:
- push
- pull_request
@@ -11,53 +12,46 @@ jobs:
include:
- os: ubuntu-20.04
container: centos:7
nodejs_version: 10
nodejs_version: 12
coverage: true
prebuild: true
- os: ubuntu-20.04
container: centos:7
nodejs_version: 12
- os: ubuntu-20.04
container: centos:7
nodejs_version: 14
- os: ubuntu-20.04
container: centos:7
nodejs_version: 15
- os: ubuntu-20.04
container: node:10-alpine3.11
prebuild: true
nodejs_version: 16
- os: ubuntu-20.04
container: node:12-alpine3.11
prebuild: true
- os: ubuntu-20.04
container: node:14-alpine3.11
- os: ubuntu-20.04
container: node:14-alpine3.13
- os: ubuntu-20.04
container: node:15-alpine3.11
- os: macos-10.15
nodejs_version: 10
prebuild: true
container: node:16-alpine3.11
- os: macos-10.15
nodejs_version: 12
prebuild: true
- os: macos-10.15
nodejs_version: 14
- os: macos-10.15
nodejs_version: 15
- os: windows-2019
nodejs_version: 10
prebuild: true
nodejs_version: 16
- os: windows-2019
nodejs_version: 12
prebuild: true
- os: windows-2019
nodejs_version: 14
- os: windows-2019
nodejs_version: 15
nodejs_version: 16
steps:
- name: Dependencies (Linux glibc)
if: contains(matrix.container, 'centos')
run: |
curl -sL https://rpm.nodesource.com/setup_${{ matrix.nodejs_version }}.x | bash -
yum install -y gcc-c++ make git python3 nodejs
yum install -y centos-release-scl
yum install -y devtoolset-10-gcc-c++ make git python3 nodejs
echo "/opt/rh/devtoolset-10/root/usr/bin" >> $GITHUB_PATH
- name: Dependencies (Linux musl)
if: contains(matrix.container, 'alpine')
run: apk add build-base git python3 --update-cache
@@ -84,4 +78,4 @@ jobs:
if: matrix.prebuild && startsWith(github.ref, 'refs/tags/')
env:
prebuild_upload: ${{ secrets.GITHUB_TOKEN }}
run: npx prebuild --runtime napi --target 3
run: npx prebuild --runtime napi --target 5

View File

@@ -1,4 +1,4 @@
{
"include-regex": "(sharp\\.node|libvips-cpp\\.dll)",
"include-regex": "(sharp-.+\\.node|libvips-cpp\\.dll)",
"strip": true
}

View File

@@ -1,108 +0,0 @@
jobs:
include:
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 10"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_10.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs=10.*"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
after_success: "[[ -n $TRAVIS_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 3\""
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 12"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_12.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 14"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_14.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Debian 11, glibc 2.29) - Node.js 15"
arch: arm64
os: linux
dist: bionic
language: shell
before_install:
- sudo chown 0.0 ${PWD}
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp arm64v8/debian:bullseye
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y build-essential git python3 curl"
- sudo docker exec sharp sh -c "curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -"
- sudo docker exec sharp sh -c "echo 'deb https://deb.nodesource.com/node_15.x sid main' >/etc/apt/sources.list.d/nodesource.list"
- sudo docker exec sharp sh -c "apt-get update && apt-get install -y nodejs"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 10"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:10-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
after_success: "[[ -n $TRAVIS_TAG ]] && sudo docker exec --env prebuild_upload sharp sh -c \"npx prebuild --runtime napi --target 3\""
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 12"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:12-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 14"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:14-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
- name: "Linux ARM64v8 (Alpine 3.11, musl 1.1.24) - Node.js 15"
arch: arm64
os: linux
dist: focal
language: shell
before_install:
- sudo chown 0.0 ${PWD}
- sudo docker run -dit --name sharp --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:15-alpine3.11
- sudo docker exec sharp sh -c "apk add build-base git python3 --update-cache"
install: sudo docker exec sharp sh -c "npm install --build-from-source --unsafe-perm"
script: sudo docker exec sharp sh -c "npm test"
cache:
npm: false

View File

@@ -16,7 +16,7 @@ Lanczos resampling ensures quality is not sacrificed for speed.
As well as image resizing, operations such as
rotation, extraction, compositing and gamma correction are available.
Most modern macOS, Windows and Linux systems running Node.js v10+
Most modern macOS, Windows and Linux systems running Node.js >= 12.13.0
do not require any additional install or runtime dependencies.
## Documentation
@@ -99,7 +99,7 @@ A [guide for contributors](https://github.com/lovell/sharp/blob/master/.github/C
covers reporting bugs, requesting features and submitting code changes.
[![Test Coverage](https://coveralls.io/repos/lovell/sharp/badge.svg?branch=master)](https://coveralls.io/r/lovell/sharp?branch=master)
[![N-API v3](https://img.shields.io/badge/N--API-v3-green.svg)](https://nodejs.org/dist/latest/docs/api/n-api.html#n_api_n_api_version_matrix)
[![Node-API v5](https://img.shields.io/badge/Node--API-v5-green.svg)](https://nodejs.org/dist/latest/docs/api/n-api.html#n_api_n_api_version_matrix)
## Licensing

View File

@@ -4,15 +4,14 @@ build: off
platform: x86
environment:
matrix:
- nodejs_version: "10"
prebuild: true
- nodejs_version: "12"
prebuild: true
- nodejs_version: "14"
- nodejs_version: "15"
- nodejs_version: "16"
install:
- ps: Update-NodeJsInstallation (Get-NodeJsLatestBuild $env:nodejs_version)
- npm install --build-from-source
test_script:
- npm test
on_success:
- if [%prebuild%] == [true] if [%APPVEYOR_REPO_TAG%] == [true] npx prebuild --runtime napi --target 3
- if [%prebuild%] == [true] if [%APPVEYOR_REPO_TAG%] == [true] npx prebuild --runtime napi --target 5

View File

@@ -1,7 +1,8 @@
{
'variables': {
'vips_version': '<!(node -p "require(\'./lib/libvips\').minimumLibvipsVersion")',
'sharp_vendor_dir': '<(module_root_dir)/vendor/<(vips_version)'
'platform_and_arch': '<!(node -p "require(\'./lib/platform\')()")',
'sharp_vendor_dir': './vendor/<(vips_version)/<(platform_and_arch)'
},
'targets': [{
'target_name': 'libvips-cpp',
@@ -37,6 +38,7 @@
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'Optimization': 1,
'WholeProgramOptimization': 'true'
},
'VCLibrarianTool': {
@@ -65,9 +67,9 @@
}]
]
}, {
'target_name': 'sharp',
'target_name': 'sharp-<(platform_and_arch)',
'defines': [
'NAPI_VERSION=3'
'NAPI_VERSION=5'
],
'dependencies': [
'<!(node -p "require(\'node-addon-api\').gyp")',
@@ -138,7 +140,7 @@
}],
['OS == "mac"', {
'link_settings': {
'library_dirs': ['<(sharp_vendor_dir)/lib'],
'library_dirs': ['../<(sharp_vendor_dir)/lib'],
'libraries': [
'libvips-cpp.42.dylib'
]
@@ -146,7 +148,7 @@
'xcode_settings': {
'OTHER_LDFLAGS': [
# Ensure runtime linking is relative to sharp.node
'-Wl,-rpath,\'@loader_path/../../vendor/<(vips_version)/lib\''
'-Wl,-rpath,\'@loader_path/../../<(sharp_vendor_dir)/lib\''
]
}
}],
@@ -155,13 +157,13 @@
'_GLIBCXX_USE_CXX11_ABI=1'
],
'link_settings': {
'library_dirs': ['<(sharp_vendor_dir)/lib'],
'library_dirs': ['../<(sharp_vendor_dir)/lib'],
'libraries': [
'-l:libvips-cpp.so.42'
],
'ldflags': [
# Ensure runtime linking is relative to sharp.node
'-Wl,-s -Wl,--disable-new-dtags -Wl,-rpath=\'$$ORIGIN/../../vendor/<(vips_version)/lib\''
'-Wl,-s -Wl,--disable-new-dtags -Wl,-rpath=\'$$ORIGIN/../../<(sharp_vendor_dir)/lib\''
]
}
}]
@@ -172,7 +174,7 @@
'-std=c++0x',
'-fexceptions',
'-Wall',
'-O3'
'-Os'
],
'xcode_settings': {
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11',
@@ -182,7 +184,7 @@
'OTHER_CPLUSPLUSFLAGS': [
'-fexceptions',
'-Wall',
'-O3'
'-Oz'
]
},
'configurations': {
@@ -202,6 +204,7 @@
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'Optimization': 1,
'WholeProgramOptimization': 'true'
},
'VCLibrarianTool': {

View File

@@ -16,7 +16,7 @@ Lanczos resampling ensures quality is not sacrificed for speed.
As well as image resizing, operations such as
rotation, extraction, compositing and gamma correction are available.
Most modern macOS, Windows and Linux systems running Node.js v10+
Most modern macOS, Windows and Linux systems running Node.js >= 12.13.0
do not require any additional install or runtime dependencies.
### Formats

View File

@@ -4,6 +4,8 @@
Remove alpha channel, if any. This is a no-op if the image does not have an alpha channel.
See also [flatten][1].
### Examples
```javascript
@@ -25,7 +27,7 @@ This is a no-op if the image already has an alpha channel.
### Parameters
- `alpha` **[number][1]** alpha transparency level (0=fully-transparent, 1=fully-opaque) (optional, default `1`)
* `alpha` **[number][2]** alpha transparency level (0=fully-transparent, 1=fully-opaque) (optional, default `1`)
### Examples
@@ -43,13 +45,13 @@ const rgba = await sharp(rgb)
.toBuffer();
```
- Throws **[Error][2]** Invalid alpha transparency level
* Throws **[Error][3]** Invalid alpha transparency level
Returns **Sharp**
**Meta**
- **since**: 0.21.2
* **since**: 0.21.2
## extractChannel
@@ -57,21 +59,26 @@ Extract a single channel from a multi-channel image.
### Parameters
- `channel` **([number][1] \| [string][3])** zero-indexed channel/band number to extract, or `red`, `green`, `blue` or `alpha`.
* `channel` **([number][2] | [string][4])** zero-indexed channel/band number to extract, or `red`, `green`, `blue` or `alpha`.
### Examples
```javascript
sharp(input)
// green.jpg is a greyscale image containing the green channel of the input
await sharp(input)
.extractChannel('green')
.toColourspace('b-w')
.toFile('green.jpg', function(err, info) {
// info.channels === 1
// green.jpg is a greyscale image containing the green channel of the input
});
.toFile('green.jpg');
```
- Throws **[Error][2]** Invalid channel
```javascript
// red1 is the red value of the first pixel, red2 the second pixel etc.
const [red1, red2, ...] = await sharp(input)
.extractChannel(0)
.raw()
.toBuffer();
```
* Throws **[Error][3]** Invalid channel
Returns **Sharp**
@@ -82,19 +89,20 @@ The meaning of the added channels depends on the output colourspace, set with `t
By default the output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
Channel ordering follows vips convention:
- sRGB: 0: Red, 1: Green, 2: Blue, 3: Alpha.
- CMYK: 0: Magenta, 1: Cyan, 2: Yellow, 3: Black, 4: Alpha.
* sRGB: 0: Red, 1: Green, 2: Blue, 3: Alpha.
* CMYK: 0: Magenta, 1: Cyan, 2: Yellow, 3: Black, 4: Alpha.
Buffers may be any of the image formats supported by sharp.
For raw pixel input, the `options` object should contain a `raw` attribute, which follows the format of the attribute of the same name in the `sharp()` constructor.
### Parameters
- `images` **([Array][4]&lt;([string][3] \| [Buffer][5])> | [string][3] \| [Buffer][5])** one or more images (file paths, Buffers).
- `options` **[Object][6]** image options, see `sharp()` constructor.
* `images` **([Array][5]<([string][4] | [Buffer][6])> | [string][4] | [Buffer][6])** one or more images (file paths, Buffers).
* `options` **[Object][7]** image options, see `sharp()` constructor.
<!---->
- Throws **[Error][2]** Invalid parameters
* Throws **[Error][3]** Invalid parameters
Returns **Sharp**
@@ -104,7 +112,7 @@ Perform a bitwise boolean operation on all input image channels (bands) to produ
### Parameters
- `boolOp` **[string][3]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
* `boolOp` **[string][4]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
### Examples
@@ -118,18 +126,20 @@ sharp('3-channel-rgb-input.png')
});
```
- Throws **[Error][2]** Invalid parameters
* Throws **[Error][3]** Invalid parameters
Returns **Sharp**
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
[1]: /api-operation#flatten
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
[5]: https://nodejs.org/api/buffer.html
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
[6]: https://nodejs.org/api/buffer.html
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object

View File

@@ -7,10 +7,11 @@ An alpha channel may be present and will be unchanged by the operation.
### Parameters
- `rgb` **([string][1] \| [Object][2])** parsed by the [color][3] module to extract chroma values.
* `rgb` **([string][1] | [Object][2])** parsed by the [color][3] module to extract chroma values.
<!---->
- Throws **[Error][4]** Invalid parameter
* Throws **[Error][4]** Invalid parameter
Returns **Sharp**
@@ -25,7 +26,7 @@ An alpha channel may be present, and will be unchanged by the operation.
### Parameters
- `greyscale` **[Boolean][5]** (optional, default `true`)
* `greyscale` **[Boolean][5]** (optional, default `true`)
Returns **Sharp**
@@ -35,7 +36,52 @@ Alternative spelling of `greyscale`.
### Parameters
- `grayscale` **[Boolean][5]** (optional, default `true`)
* `grayscale` **[Boolean][5]** (optional, default `true`)
Returns **Sharp**
## pipelineColourspace
Set the pipeline colourspace.
The input image will be converted to the provided colourspace at the start of the pipeline.
All operations will use this colourspace before converting to the output colourspace, as defined by [toColourspace][6].
This feature is experimental and has not yet been fully-tested with all operations.
### Parameters
* `colourspace` **[string][1]?** pipeline colourspace e.g. `rgb16`, `scrgb`, `lab`, `grey16` [...][7]
### Examples
```javascript
// Run pipeline in 16 bits per channel RGB while converting final result to 8 bits per channel sRGB.
await sharp(input)
.pipelineColourspace('rgb16')
.toColourspace('srgb')
.toFile('16bpc-pipeline-to-8bpc-output.png')
```
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
**Meta**
* **since**: 0.29.0
## pipelineColorspace
Alternative spelling of `pipelineColourspace`.
### Parameters
* `colorspace` **[string][1]?** pipeline colorspace.
<!---->
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
@@ -46,7 +92,7 @@ By default output image will be web-friendly sRGB, with additional channels inte
### Parameters
- `colourspace` **[string][1]?** output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][6]
* `colourspace` **[string][1]?** output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][8]
### Examples
@@ -57,7 +103,7 @@ await sharp(input)
.toFile('16-bpp.png')
```
- Throws **[Error][4]** Invalid parameters
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
@@ -67,10 +113,11 @@ Alternative spelling of `toColourspace`.
### Parameters
- `colorspace` **[string][1]?** output colorspace.
* `colorspace` **[string][1]?** output colorspace.
<!---->
- Throws **[Error][4]** Invalid parameters
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
@@ -84,4 +131,8 @@ Returns **Sharp**
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[6]: https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568
[6]: #tocolourspace
[7]: https://github.com/libvips/libvips/blob/41cff4e9d0838498487a00623462204eb10ee5b8/libvips/iofuncs/enumtypes.c#L774
[8]: https://github.com/libvips/libvips/blob/3c0bfdf74ce1dc37a6429bed47fa76f16e2cd70a/libvips/iofuncs/enumtypes.c#L777-L794

View File

@@ -19,24 +19,30 @@ and [https://www.cairographics.org/operators/][2]
### Parameters
- `images` **[Array][3]&lt;[Object][4]>** Ordered list of images to composite
- `images[].input` **([Buffer][5] \| [String][6])?** Buffer containing image data, String containing the path to an image file, or Create object (see below)
- `images[].input.create` **[Object][4]?** describes a blank overlay to be created.
- `images[].input.create.width` **[Number][7]?**
- `images[].input.create.height` **[Number][7]?**
- `images[].input.create.channels` **[Number][7]?** 3-4
- `images[].input.create.background` **([String][6] \| [Object][4])?** parsed by the [color][8] module to extract values for red, green, blue and alpha.
- `images[].blend` **[String][6]** how to blend this image with the image below. (optional, default `'over'`)
- `images[].gravity` **[String][6]** gravity at which to place the overlay. (optional, default `'centre'`)
- `images[].top` **[Number][7]?** the pixel offset from the top edge.
- `images[].left` **[Number][7]?** the pixel offset from the left edge.
- `images[].tile` **[Boolean][9]** set to true to repeat the overlay image across the entire image with the given `gravity`. (optional, default `false`)
- `images[].premultiplied` **[Boolean][9]** set to true to avoid premultipling the image below. Equivalent to the `--premultiplied` vips option. (optional, default `false`)
- `images[].density` **[Number][7]** number representing the DPI for vector overlay image. (optional, default `72`)
- `images[].raw` **[Object][4]?** describes overlay when using raw pixel data.
- `images[].raw.width` **[Number][7]?**
- `images[].raw.height` **[Number][7]?**
- `images[].raw.channels` **[Number][7]?**
* `images` **[Array][3]<[Object][4]>** Ordered list of images to composite
* `images[].input` **([Buffer][5] | [String][6])?** Buffer containing image data, String containing the path to an image file, or Create object (see below)
* `images[].input.create` **[Object][4]?** describes a blank overlay to be created.
* `images[].input.create.width` **[Number][7]?**
* `images[].input.create.height` **[Number][7]?**
* `images[].input.create.channels` **[Number][7]?** 3-4
* `images[].input.create.background` **([String][6] | [Object][4])?** parsed by the [color][8] module to extract values for red, green, blue and alpha.
* `images[].blend` **[String][6]** how to blend this image with the image below. (optional, default `'over'`)
* `images[].gravity` **[String][6]** gravity at which to place the overlay. (optional, default `'centre'`)
* `images[].top` **[Number][7]?** the pixel offset from the top edge.
* `images[].left` **[Number][7]?** the pixel offset from the left edge.
* `images[].tile` **[Boolean][9]** set to true to repeat the overlay image across the entire image with the given `gravity`. (optional, default `false`)
* `images[].premultiplied` **[Boolean][9]** set to true to avoid premultipling the image below. Equivalent to the `--premultiplied` vips option. (optional, default `false`)
* `images[].density` **[Number][7]** number representing the DPI for vector overlay image. (optional, default `72`)
* `images[].raw` **[Object][4]?** describes overlay when using raw pixel data.
* `images[].raw.width` **[Number][7]?**
* `images[].raw.height` **[Number][7]?**
* `images[].raw.channels` **[Number][7]?**
* `images[].failOnError` **[boolean][9]** @see [constructor parameters][10] (optional, default `true`)
* `images[].limitInputPixels` **([number][7] | [boolean][9])** @see [constructor parameters][10] (optional, default `268402689`)
### Examples
@@ -57,13 +63,13 @@ sharp('input.png')
});
```
- Throws **[Error][10]** Invalid parameters
* Throws **[Error][11]** Invalid parameters
Returns **Sharp**
**Meta**
- **since**: 0.22.0
* **since**: 0.22.0
[1]: https://libvips.github.io/libvips/API/current/libvips-conversion.html#VipsBlendMode
@@ -83,4 +89,6 @@ Returns **Sharp**
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[10]: /api-constructor#parameters
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error

View File

@@ -13,37 +13,44 @@ Implements the [stream.Duplex][1] class.
### Parameters
- `input` **([Buffer][2] \| [Uint8Array][3] \| [Uint8ClampedArray][4] \| [string][5])?** if present, can be
a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data, or
a String containing the filesystem path to an JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image file.
JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
- `options` **[Object][6]?** if present, is an Object with optional attributes.
- `options.failOnError` **[boolean][7]** by default halt processing and raise an error when loading invalid images.
Set this flag to `false` if you'd rather apply a "best effort" to decode images, even if the data is corrupt or invalid. (optional, default `true`)
- `options.limitInputPixels` **([number][8] \| [boolean][7])** Do not process input images where the number of pixels
(width x height) exceeds this limit. Assumes image dimensions contained in the input metadata can be trusted.
An integral Number of pixels, zero or false to remove limit, true to use default limit of 268402689 (0x3FFF x 0x3FFF). (optional, default `268402689`)
- `options.sequentialRead` **[boolean][7]** Set this to `true` to use sequential rather than random access where possible.
This can reduce memory usage and might improve performance on some systems. (optional, default `false`)
- `options.density` **[number][8]** number representing the DPI for vector images in the range 1 to 100000. (optional, default `72`)
- `options.pages` **[number][8]** number of pages to extract for multi-page input (GIF, WebP, AVIF, TIFF, PDF), use -1 for all pages. (optional, default `1`)
- `options.page` **[number][8]** page number to start extracting from for multi-page input (GIF, WebP, AVIF, TIFF, PDF), zero based. (optional, default `0`)
- `options.subifd` **[number][8]** subIFD (Sub Image File Directory) to extract for OME-TIFF, defaults to main image. (optional, default `-1`)
- `options.level` **[number][8]** level to extract from a multi-level input (OpenSlide), zero based. (optional, default `0`)
- `options.animated` **[boolean][7]** Set to `true` to read all frames/pages of an animated image (equivalent of setting `pages` to `-1`). (optional, default `false`)
- `options.raw` **[Object][6]?** describes raw pixel input image data. See `raw()` for pixel ordering.
- `options.raw.width` **[number][8]?** integral number of pixels wide.
- `options.raw.height` **[number][8]?** integral number of pixels high.
- `options.raw.channels` **[number][8]?** integral number of channels, between 1 and 4.
- `options.create` **[Object][6]?** describes a new image to be created.
- `options.create.width` **[number][8]?** integral number of pixels wide.
- `options.create.height` **[number][8]?** integral number of pixels high.
- `options.create.channels` **[number][8]?** integral number of channels, either 3 (RGB) or 4 (RGBA).
- `options.create.background` **([string][5] \| [Object][6])?** parsed by the [color][9] module to extract values for red, green, blue and alpha.
- `options.create.noise` **[Object][6]?** describes a noise to be created.
- `options.create.noise.type` **[string][5]?** type of generated noise, currently only `gaussian` is supported.
- `options.create.noise.mean` **[number][8]?** mean of pixels in generated noise.
- `options.create.noise.sigma` **[number][8]?** standard deviation of pixels in generated noise.
* `input` **([Buffer][2] | [Uint8Array][3] | [Uint8ClampedArray][4] | [Int8Array][5] | [Uint16Array][6] | [Int16Array][7] | [Uint32Array][8] | [Int32Array][9] | [Float32Array][10] | [Float64Array][11] | [string][12])?** if present, can be
a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image data, or
a TypedArray containing raw pixel image data, or
a String containing the filesystem path to an JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image file.
JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
* `options` **[Object][13]?** if present, is an Object with optional attributes.
* `options.failOnError` **[boolean][14]** by default halt processing and raise an error when loading invalid images.
Set this flag to `false` if you'd rather apply a "best effort" to decode images, even if the data is corrupt or invalid. (optional, default `true`)
* `options.limitInputPixels` **([number][15] | [boolean][14])** Do not process input images where the number of pixels
(width x height) exceeds this limit. Assumes image dimensions contained in the input metadata can be trusted.
An integral Number of pixels, zero or false to remove limit, true to use default limit of 268402689 (0x3FFF x 0x3FFF). (optional, default `268402689`)
* `options.sequentialRead` **[boolean][14]** Set this to `true` to use sequential rather than random access where possible.
This can reduce memory usage and might improve performance on some systems. (optional, default `false`)
* `options.density` **[number][15]** number representing the DPI for vector images in the range 1 to 100000. (optional, default `72`)
* `options.pages` **[number][15]** number of pages to extract for multi-page input (GIF, WebP, AVIF, TIFF, PDF), use -1 for all pages. (optional, default `1`)
* `options.page` **[number][15]** page number to start extracting from for multi-page input (GIF, WebP, AVIF, TIFF, PDF), zero based. (optional, default `0`)
* `options.subifd` **[number][15]** subIFD (Sub Image File Directory) to extract for OME-TIFF, defaults to main image. (optional, default `-1`)
* `options.level` **[number][15]** level to extract from a multi-level input (OpenSlide), zero based. (optional, default `0`)
* `options.animated` **[boolean][14]** Set to `true` to read all frames/pages of an animated image (equivalent of setting `pages` to `-1`). (optional, default `false`)
* `options.raw` **[Object][13]?** describes raw pixel input image data. See `raw()` for pixel ordering.
* `options.raw.width` **[number][15]?** integral number of pixels wide.
* `options.raw.height` **[number][15]?** integral number of pixels high.
* `options.raw.channels` **[number][15]?** integral number of channels, between 1 and 4.
* `options.raw.premultiplied` **[boolean][14]?** specifies that the raw input has already been premultiplied, set to `true`
to avoid sharp premultiplying the image. (optional, default `false`)
* `options.create` **[Object][13]?** describes a new image to be created.
* `options.create.width` **[number][15]?** integral number of pixels wide.
* `options.create.height` **[number][15]?** integral number of pixels high.
* `options.create.channels` **[number][15]?** integral number of channels, either 3 (RGB) or 4 (RGBA).
* `options.create.background` **([string][12] | [Object][13])?** parsed by the [color][16] module to extract values for red, green, blue and alpha.
* `options.create.noise` **[Object][13]?** describes a noise to be created.
* `options.create.noise.type` **[string][12]?** type of generated noise, currently only `gaussian` is supported.
* `options.create.noise.mean` **[number][15]?** mean of pixels in generated noise.
* `options.create.noise.sigma` **[number][15]?** standard deviation of pixels in generated noise.
### Examples
@@ -120,9 +127,9 @@ await sharp({
}).toFile('noise.png');
```
- Throws **[Error][10]** Invalid parameters
* Throws **[Error][17]** Invalid parameters
Returns **[Sharp][11]**
Returns **[Sharp][18]**
## clone
@@ -190,7 +197,7 @@ Promise.all(promises)
});
```
Returns **[Sharp][11]**
Returns **[Sharp][18]**
[1]: http://nodejs.org/api/stream.html#stream_class_stream_duplex
@@ -200,16 +207,30 @@ Returns **[Sharp][11]**
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Uint8ClampedArray
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Int8Array
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Uint16Array
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Int16Array
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Uint32Array
[9]: https://www.npmjs.org/package/color
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Int32Array
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array
[11]: #sharp
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float64Array
[12]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
[13]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
[14]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
[15]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
[16]: https://www.npmjs.org/package/color
[17]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
[18]: #sharp

View File

@@ -5,35 +5,37 @@
Fast access to (uncached) image metadata without decoding any compressed image data.
A `Promise` is returned when `callback` is not provided.
- `format`: Name of decoder used to decompress image data e.g. `jpeg`, `png`, `webp`, `gif`, `svg`
- `size`: Total size of image in bytes, for Stream and Buffer input only
- `width`: Number of pixels wide (EXIF orientation is not taken into consideration)
- `height`: Number of pixels high (EXIF orientation is not taken into consideration)
- `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][1]
- `channels`: Number of bands e.g. `3` for sRGB, `4` for CMYK
- `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...][2]
- `density`: Number of pixels per inch (DPI), if present
- `chromaSubsampling`: String containing JPEG chroma subsampling, `4:2:0` or `4:4:4` for RGB, `4:2:0:4` or `4:4:4:4` for CMYK
- `isProgressive`: Boolean indicating whether the image is interlaced using a progressive scan
- `pages`: Number of pages/frames contained within the image, with support for TIFF, HEIF, PDF, animated GIF and animated WebP
- `pageHeight`: Number of pixels high each page in a multi-page image will be.
- `loop`: Number of times to loop an animated image, zero refers to a continuous loop.
- `delay`: Delay in ms between each page in an animated image, provided as an array of integers.
- `pagePrimary`: Number of the primary page in a HEIF image
- `levels`: Details of each level in a multi-level image provided as an array of objects, requires libvips compiled with support for OpenSlide
- `subifds`: Number of Sub Image File Directories in an OME-TIFF image
- `hasProfile`: Boolean indicating the presence of an embedded ICC profile
- `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
- `orientation`: Number value of the EXIF Orientation header, if present
- `exif`: Buffer containing raw EXIF data, if present
- `icc`: Buffer containing raw [ICC][3] profile data, if present
- `iptc`: Buffer containing raw IPTC data, if present
- `xmp`: Buffer containing raw XMP data, if present
- `tifftagPhotoshop`: Buffer containing raw TIFFTAG_PHOTOSHOP data, if present
* `format`: Name of decoder used to decompress image data e.g. `jpeg`, `png`, `webp`, `gif`, `svg`
* `size`: Total size of image in bytes, for Stream and Buffer input only
* `width`: Number of pixels wide (EXIF orientation is not taken into consideration)
* `height`: Number of pixels high (EXIF orientation is not taken into consideration)
* `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][1]
* `channels`: Number of bands e.g. `3` for sRGB, `4` for CMYK
* `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...][2]
* `density`: Number of pixels per inch (DPI), if present
* `chromaSubsampling`: String containing JPEG chroma subsampling, `4:2:0` or `4:4:4` for RGB, `4:2:0:4` or `4:4:4:4` for CMYK
* `isProgressive`: Boolean indicating whether the image is interlaced using a progressive scan
* `pages`: Number of pages/frames contained within the image, with support for TIFF, HEIF, PDF, animated GIF and animated WebP
* `pageHeight`: Number of pixels high each page in a multi-page image will be.
* `loop`: Number of times to loop an animated image, zero refers to a continuous loop.
* `delay`: Delay in ms between each page in an animated image, provided as an array of integers.
* `pagePrimary`: Number of the primary page in a HEIF image
* `levels`: Details of each level in a multi-level image provided as an array of objects, requires libvips compiled with support for OpenSlide
* `subifds`: Number of Sub Image File Directories in an OME-TIFF image
* `background`: Default background colour, if present, for PNG (bKGD) and GIF images, either an RGB Object or a single greyscale value
* `compression`: The encoder used to compress an HEIF file, `av1` (AVIF) or `hevc` (HEIC)
* `hasProfile`: Boolean indicating the presence of an embedded ICC profile
* `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
* `orientation`: Number value of the EXIF Orientation header, if present
* `exif`: Buffer containing raw EXIF data, if present
* `icc`: Buffer containing raw [ICC][3] profile data, if present
* `iptc`: Buffer containing raw IPTC data, if present
* `xmp`: Buffer containing raw XMP data, if present
* `tifftagPhotoshop`: Buffer containing raw TIFFTAG_PHOTOSHOP data, if present
### Parameters
- `callback` **[Function][4]?** called with the arguments `(err, metadata)`
* `callback` **[Function][4]?** called with the arguments `(err, metadata)`
### Examples
@@ -52,32 +54,35 @@ image
});
```
Returns **([Promise][5]&lt;[Object][6]> | Sharp)**
Returns **([Promise][5]<[Object][6]> | Sharp)**
## stats
Access to pixel-derived image statistics for every channel in the image.
A `Promise` is returned when `callback` is not provided.
- `channels`: Array of channel statistics for each channel in the image. Each channel statistic contains
- `min` (minimum value in the channel)
- `max` (maximum value in the channel)
- `sum` (sum of all values in a channel)
- `squaresSum` (sum of squared values in a channel)
- `mean` (mean of the values in a channel)
- `stdev` (standard deviation for the values in a channel)
- `minX` (x-coordinate of one of the pixel where the minimum lies)
- `minY` (y-coordinate of one of the pixel where the minimum lies)
- `maxX` (x-coordinate of one of the pixel where the maximum lies)
- `maxY` (y-coordinate of one of the pixel where the maximum lies)
- `isOpaque`: Is the image fully opaque? Will be `true` if the image has no alpha channel or if every pixel is fully opaque.
- `entropy`: Histogram-based estimation of greyscale entropy, discarding alpha channel if any (experimental)
- `sharpness`: Estimation of greyscale sharpness based on the standard deviation of a Laplacian convolution, discarding alpha channel if any (experimental)
- `dominant`: Object containing most dominant sRGB colour based on a 4096-bin 3D histogram (experimental)
* `channels`: Array of channel statistics for each channel in the image. Each channel statistic contains
* `min` (minimum value in the channel)
* `max` (maximum value in the channel)
* `sum` (sum of all values in a channel)
* `squaresSum` (sum of squared values in a channel)
* `mean` (mean of the values in a channel)
* `stdev` (standard deviation for the values in a channel)
* `minX` (x-coordinate of one of the pixel where the minimum lies)
* `minY` (y-coordinate of one of the pixel where the minimum lies)
* `maxX` (x-coordinate of one of the pixel where the maximum lies)
* `maxY` (y-coordinate of one of the pixel where the maximum lies)
* `isOpaque`: Is the image fully opaque? Will be `true` if the image has no alpha channel or if every pixel is fully opaque.
* `entropy`: Histogram-based estimation of greyscale entropy, discarding alpha channel if any (experimental)
* `sharpness`: Estimation of greyscale sharpness based on the standard deviation of a Laplacian convolution, discarding alpha channel if any (experimental)
* `dominant`: Object containing most dominant sRGB colour based on a 4096-bin 3D histogram (experimental)
**Note**: Statistics are derived from the original input image. Any operations performed on the image must first be
written to a buffer in order to run `stats` on the result (see third example).
### Parameters
- `callback` **[Function][4]?** called with the arguments `(err, stats)`
* `callback` **[Function][4]?** called with the arguments `(err, stats)`
### Examples
@@ -95,7 +100,15 @@ const { entropy, sharpness, dominant } = await sharp(input).stats();
const { r, g, b } = dominant;
```
Returns **[Promise][5]&lt;[Object][6]>**
```javascript
const image = sharp(input);
// store intermediate result
const part = await image.extract(region).toBuffer();
// create new instance to obtain statistics of extracted region
const stats = await sharp(part).stats();
```
Returns **[Promise][5]<[Object][6]>**
[1]: https://libvips.github.io/libvips/API/current/VipsImage.html#VipsInterpretation

View File

@@ -21,9 +21,10 @@ for example `rotate(x).extract(y)` will produce a different result to `extract(y
### Parameters
- `angle` **[number][1]** angle of rotation. (optional, default `auto`)
- `options` **[Object][2]?** if present, is an Object with optional attributes.
- `options.background` **([string][3] \| [Object][2])** parsed by the [color][4] module to extract values for red, green, blue and alpha. (optional, default `"#000000"`)
* `angle` **[number][1]** angle of rotation. (optional, default `auto`)
* `options` **[Object][2]?** if present, is an Object with optional attributes.
* `options.background` **([string][3] | [Object][2])** parsed by the [color][4] module to extract values for red, green, blue and alpha. (optional, default `"#000000"`)
### Examples
@@ -39,7 +40,7 @@ const pipeline = sharp()
readableStream.pipe(pipeline);
```
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -50,7 +51,7 @@ The use of `flip` implies the removal of the EXIF `Orientation` tag, if any.
### Parameters
- `flip` **[Boolean][6]** (optional, default `true`)
* `flip` **[Boolean][6]** (optional, default `true`)
Returns **Sharp**
@@ -61,7 +62,7 @@ The use of `flop` implies the removal of the EXIF `Orientation` tag, if any.
### Parameters
- `flop` **[Boolean][6]** (optional, default `true`)
* `flop` **[Boolean][6]** (optional, default `true`)
Returns **Sharp**
@@ -75,25 +76,26 @@ A particular interpolator may also be specified. Set the `interpolator` option t
In the case of a 2x2 matrix, the transform is:
- X = `matrix[0, 0]` \* (x + `idx`) + `matrix[0, 1]` \* (y + `idy`) + `odx`
- Y = `matrix[1, 0]` \* (x + `idx`) + `matrix[1, 1]` \* (y + `idy`) + `ody`
* X = `matrix[0, 0]` \* (x + `idx`) + `matrix[0, 1]` \* (y + `idy`) + `odx`
* Y = `matrix[1, 0]` \* (x + `idx`) + `matrix[1, 1]` \* (y + `idy`) + `ody`
where:
- x and y are the coordinates in input image.
- X and Y are the coordinates in output image.
- (0,0) is the upper left corner.
* x and y are the coordinates in input image.
* X and Y are the coordinates in output image.
* (0,0) is the upper left corner.
### Parameters
- `matrix` **([Array][7]&lt;[Array][7]&lt;[number][1]>> | [Array][7]&lt;[number][1]>)** affine transformation matrix
- `options` **[Object][2]?** if present, is an Object with optional attributes.
- `options.background` **([String][3] \| [Object][2])** parsed by the [color][4] module to extract values for red, green, blue and alpha. (optional, default `"#000000"`)
- `options.idx` **[Number][1]** input horizontal offset (optional, default `0`)
- `options.idy` **[Number][1]** input vertical offset (optional, default `0`)
- `options.odx` **[Number][1]** output horizontal offset (optional, default `0`)
- `options.ody` **[Number][1]** output vertical offset (optional, default `0`)
- `options.interpolator` **[String][3]** interpolator (optional, default `sharp.interpolators.bicubic`)
* `matrix` **([Array][7]<[Array][7]<[number][1]>> | [Array][7]<[number][1]>)** affine transformation matrix
* `options` **[Object][2]?** if present, is an Object with optional attributes.
* `options.background` **([String][3] | [Object][2])** parsed by the [color][4] module to extract values for red, green, blue and alpha. (optional, default `"#000000"`)
* `options.idx` **[Number][1]** input horizontal offset (optional, default `0`)
* `options.idy` **[Number][1]** input vertical offset (optional, default `0`)
* `options.odx` **[Number][1]** output horizontal offset (optional, default `0`)
* `options.ody` **[Number][1]** output vertical offset (optional, default `0`)
* `options.interpolator` **[String][3]** interpolator (optional, default `sharp.interpolators.bicubic`)
### Examples
@@ -112,13 +114,13 @@ inputStream
.pipe(pipeline);
```
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
**Meta**
- **since**: 0.27.0
* **since**: 0.27.0
## sharpen
@@ -129,12 +131,13 @@ Separate control over the level of sharpening in "flat" and "jagged" areas is av
### Parameters
- `sigma` **[number][1]?** the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
- `flat` **[number][1]** the level of sharpening to apply to "flat" areas. (optional, default `1.0`)
- `jagged` **[number][1]** the level of sharpening to apply to "jagged" areas. (optional, default `2.0`)
* `sigma` **[number][1]?** the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
* `flat` **[number][1]** the level of sharpening to apply to "flat" areas. (optional, default `1.0`)
* `jagged` **[number][1]** the level of sharpening to apply to "jagged" areas. (optional, default `2.0`)
<!---->
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -145,10 +148,11 @@ When used without parameters the default window is 3x3.
### Parameters
- `size` **[number][1]** square mask size: size x size (optional, default `3`)
* `size` **[number][1]** square mask size: size x size (optional, default `3`)
<!---->
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -160,10 +164,11 @@ When a `sigma` is provided, performs a slower, more accurate Gaussian blur.
### Parameters
- `sigma` **[number][1]?** a value between 0.3 and 1000 representing the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
* `sigma` **[number][1]?** a value between 0.3 and 1000 representing the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
<!---->
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -171,16 +176,19 @@ Returns **Sharp**
Merge alpha transparency channel, if any, with a background, then remove the alpha channel.
See also [removeAlpha][8].
### Parameters
- `options` **[Object][2]?**
- `options.background` **([string][3] \| [Object][2])** background colour, parsed by the [color][4] module, defaults to black. (optional, default `{r:0,g:0,b:0}`)
* `options` **[Object][2]?**
* `options.background` **([string][3] | [Object][2])** background colour, parsed by the [color][4] module, defaults to black. (optional, default `{r:0,g:0,b:0}`)
### Examples
```javascript
await sharp(rgbaInput)
.flatten('#F0A703')
.flatten({ background: '#F0A703' })
.toBuffer();
```
@@ -198,11 +206,12 @@ Supply a second argument to use a different output gamma value, otherwise the fi
### Parameters
- `gamma` **[number][1]** value between 1.0 and 3.0. (optional, default `2.2`)
- `gammaOut` **[number][1]?** value between 1.0 and 3.0. (optional, defaults to same as `gamma`)
* `gamma` **[number][1]** value between 1.0 and 3.0. (optional, default `2.2`)
* `gammaOut` **[number][1]?** value between 1.0 and 3.0. (optional, defaults to same as `gamma`)
<!---->
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -212,7 +221,9 @@ Produce the "negative" of the image.
### Parameters
- `negate` **[Boolean][6]** (optional, default `true`)
* `options` **[Object][2]?**
* `options.alpha` **[Boolean][6]** Whether or not to negate any alpha channel (optional, default `true`)
Returns **Sharp**
@@ -222,7 +233,7 @@ Enhance output image contrast by stretching its luminance to cover the full dyna
### Parameters
- `normalise` **[Boolean][6]** (optional, default `true`)
* `normalise` **[Boolean][6]** (optional, default `true`)
Returns **Sharp**
@@ -232,22 +243,50 @@ Alternative spelling of normalise.
### Parameters
- `normalize` **[Boolean][6]** (optional, default `true`)
* `normalize` **[Boolean][6]** (optional, default `true`)
Returns **Sharp**
## clahe
Perform contrast limiting adaptive histogram equalization
[CLAHE][9].
This will, in general, enhance the clarity of the image by bringing out darker details.
### Parameters
* `options` **[Object][2]**
* `options.width` **[number][1]** integer width of the region in pixels.
* `options.height` **[number][1]** integer height of the region in pixels.
* `options.maxSlope` **[number][1]** maximum value for the slope of the
cumulative histogram. A value of 0 disables contrast limiting. Valid values
are integers in the range 0-100 (inclusive) (optional, default `3`)
<!---->
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
**Meta**
* **since**: 0.28.3
## convolve
Convolve the image with the specified kernel.
### Parameters
- `kernel` **[Object][2]**
- `kernel.width` **[number][1]** width of the kernel in pixels.
- `kernel.height` **[number][1]** width of the kernel in pixels.
- `kernel.kernel` **[Array][7]&lt;[number][1]>** Array of length `width*height` containing the kernel values.
- `kernel.scale` **[number][1]** the scale of the kernel in pixels. (optional, default `sum`)
- `kernel.offset` **[number][1]** the offset of the kernel in pixels. (optional, default `0`)
* `kernel` **[Object][2]**
* `kernel.width` **[number][1]** width of the kernel in pixels.
* `kernel.height` **[number][1]** height of the kernel in pixels.
* `kernel.kernel` **[Array][7]<[number][1]>** Array of length `width*height` containing the kernel values.
* `kernel.scale` **[number][1]** the scale of the kernel in pixels. (optional, default `sum`)
* `kernel.offset` **[number][1]** the offset of the kernel in pixels. (optional, default `0`)
### Examples
@@ -265,7 +304,7 @@ sharp(input)
});
```
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -275,13 +314,15 @@ Any pixel value greater than or equal to the threshold value will be set to 255,
### Parameters
- `threshold` **[number][1]** a value in the range 0-255 representing the level at which the threshold will be applied. (optional, default `128`)
- `options` **[Object][2]?**
- `options.greyscale` **[Boolean][6]** convert to single channel greyscale. (optional, default `true`)
- `options.grayscale` **[Boolean][6]** alternative spelling for greyscale. (optional, default `true`)
* `threshold` **[number][1]** a value in the range 0-255 representing the level at which the threshold will be applied. (optional, default `128`)
* `options` **[Object][2]?**
* `options.greyscale` **[Boolean][6]** convert to single channel greyscale. (optional, default `true`)
* `options.grayscale` **[Boolean][6]** alternative spelling for greyscale. (optional, default `true`)
- Throws **[Error][5]** Invalid parameters
<!---->
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -294,16 +335,19 @@ the selected bitwise boolean `operation` between the corresponding pixels of the
### Parameters
- `operand` **([Buffer][8] \| [string][3])** Buffer containing image data or string containing the path to an image file.
- `operator` **[string][3]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
- `options` **[Object][2]?**
- `options.raw` **[Object][2]?** describes operand when using raw pixel data.
- `options.raw.width` **[number][1]?**
- `options.raw.height` **[number][1]?**
- `options.raw.channels` **[number][1]?**
* `operand` **([Buffer][10] | [string][3])** Buffer containing image data or string containing the path to an image file.
* `operator` **[string][3]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
* `options` **[Object][2]?**
* `options.raw` **[Object][2]?** describes operand when using raw pixel data.
- Throws **[Error][5]** Invalid parameters
* `options.raw.width` **[number][1]?**
* `options.raw.height` **[number][1]?**
* `options.raw.channels` **[number][1]?**
<!---->
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -313,11 +357,12 @@ Apply the linear formula a \* input + b to the image (levels adjustment)
### Parameters
- `a` **[number][1]** multiplier (optional, default `1.0`)
- `b` **[number][1]** offset (optional, default `0.0`)
* `a` **[number][1]** multiplier (optional, default `1.0`)
* `b` **[number][1]** offset (optional, default `0.0`)
<!---->
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
@@ -327,7 +372,7 @@ Recomb the image with the specified matrix.
### Parameters
- `inputMatrix` **[Array][7]&lt;[Array][7]&lt;[number][1]>>** 3x3 Recombination matrix
* `inputMatrix` **[Array][7]<[Array][7]<[number][1]>>** 3x3 Recombination matrix
### Examples
@@ -345,31 +390,35 @@ sharp(input)
});
```
- Throws **[Error][5]** Invalid parameters
* Throws **[Error][5]** Invalid parameters
Returns **Sharp**
**Meta**
- **since**: 0.21.1
* **since**: 0.21.1
## modulate
Transforms the image using brightness, saturation and hue rotation.
Transforms the image using brightness, saturation, hue rotation, and lightness.
Brightness and lightness both operate on luminance, with the difference being that
brightness is multiplicative whereas lightness is additive.
### Parameters
- `options` **[Object][2]?**
- `options.brightness` **[number][1]?** Brightness multiplier
- `options.saturation` **[number][1]?** Saturation multiplier
- `options.hue` **[number][1]?** Degrees for hue rotation
* `options` **[Object][2]?**
* `options.brightness` **[number][1]?** Brightness multiplier
* `options.saturation` **[number][1]?** Saturation multiplier
* `options.hue` **[number][1]?** Degrees for hue rotation
* `options.lightness` **[number][1]?** Lightness addend
### Examples
```javascript
sharp(input)
.modulate({
brightness: 2 // increase lightness by a factor of 2
brightness: 2 // increase brightness by a factor of 2
});
sharp(input)
@@ -377,6 +426,11 @@ sharp(input)
hue: 180 // hue-rotate by 180 degrees
});
sharp(input)
.modulate({
lightness: 50 // increase lightness by +50
});
// decreate brightness and saturation while also hue-rotating by 90 degrees
sharp(input)
.modulate({
@@ -390,7 +444,7 @@ Returns **Sharp**
**Meta**
- **since**: 0.22.1
* **since**: 0.22.1
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
@@ -406,4 +460,8 @@ Returns **Sharp**
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
[8]: https://nodejs.org/api/buffer.html
[8]: /api-channel#removealpha
[9]: https://en.wikipedia.org/wiki/Adaptive_histogram_equalization#Contrast_Limited_AHE
[10]: https://nodejs.org/api/buffer.html

View File

@@ -11,12 +11,14 @@ Note that raw pixel data is only supported for buffer output.
By default all metadata will be removed, which includes EXIF-based orientation.
See [withMetadata][1] for control over this.
The caller is responsible for ensuring directory structures and permissions exist.
A `Promise` is returned when `callback` is not provided.
### Parameters
- `fileOut` **[string][2]** the path to write the image data to.
- `callback` **[Function][3]?** called on completion with two arguments `(err, info)`.
* `fileOut` **[string][2]** the path to write the image data to.
* `callback` **[Function][3]?** called on completion with two arguments `(err, info)`.
`info` contains the output image `format`, `size` (bytes), `width`, `height`,
`channels` and `premultiplied` (indicating if premultiplication was used).
When using a crop strategy also contains `cropOffsetLeft` and `cropOffsetTop`.
@@ -35,9 +37,9 @@ sharp(input)
.catch(err => { ... });
```
- Throws **[Error][4]** Invalid parameters
* Throws **[Error][4]** Invalid parameters
Returns **[Promise][5]&lt;[Object][6]>** when no callback is provided
Returns **[Promise][5]<[Object][6]>** when no callback is provided
## toBuffer
@@ -51,19 +53,21 @@ See [withMetadata][1] for control over this.
`callback`, if present, gets three arguments `(err, data, info)` where:
- `err` is an error, if any.
- `data` is the output image data.
- `info` contains the output image `format`, `size` (bytes), `width`, `height`,
`channels` and `premultiplied` (indicating if premultiplication was used).
When using a crop strategy also contains `cropOffsetLeft` and `cropOffsetTop`.
* `err` is an error, if any.
* `data` is the output image data.
* `info` contains the output image `format`, `size` (bytes), `width`, `height`,
`channels` and `premultiplied` (indicating if premultiplication was used).
When using a crop strategy also contains `cropOffsetLeft` and `cropOffsetTop`.
A `Promise` is returned when `callback` is not provided.
### Parameters
- `options` **[Object][6]?**
- `options.resolveWithObject` **[boolean][7]?** Resolve the Promise with an Object containing `data` and `info` properties instead of resolving only with `data`.
- `callback` **[Function][3]?**
* `options` **[Object][6]?**
* `options.resolveWithObject` **[boolean][7]?** Resolve the Promise with an Object containing `data` and `info` properties instead of resolving only with `data`.
* `callback` **[Function][3]?**
### Examples
@@ -103,7 +107,7 @@ await sharp(pixelArray, { raw: { width, height, channels } })
.toFile('my-changed-image.jpg');
```
Returns **[Promise][5]&lt;[Buffer][8]>** when no callback is provided
Returns **[Promise][5]<[Buffer][8]>** when no callback is provided
## withMetadata
@@ -116,10 +120,12 @@ sRGB colour space and strip all metadata, including the removal of any ICC profi
### Parameters
- `options` **[Object][6]?**
- `options.orientation` **[number][9]?** value between 1 and 8, used to update the EXIF `Orientation` tag.
- `options.icc` **[string][2]?** filesystem path to output ICC profile, defaults to sRGB.
- `options.exif` **[Object][6]&lt;[Object][6]>** Object keyed by IFD0, IFD1 etc. of key/value string pairs to write as EXIF data. (optional, default `{}`)
* `options` **[Object][6]?**
* `options.orientation` **[number][9]?** value between 1 and 8, used to update the EXIF `Orientation` tag.
* `options.icc` **[string][2]?** filesystem path to output ICC profile, defaults to sRGB.
* `options.exif` **[Object][6]<[Object][6]>** Object keyed by IFD0, IFD1 etc. of key/value string pairs to write as EXIF data. (optional, default `{}`)
* `options.density` **[number][9]?** Number of pixels per inch (DPI).
### Examples
@@ -132,7 +138,7 @@ sharp('input.jpg')
```javascript
// Set "IFD0-Copyright" in output EXIF metadata
await sharp(input)
const data = await sharp(input)
.withMetadata({
exif: {
IFD0: {
@@ -141,9 +147,15 @@ await sharp(input)
}
})
.toBuffer();
* @example
// Set output metadata to 96 DPI
const data = await sharp(input)
.withMetadata({ density: 96 })
.toBuffer();
```
- Throws **[Error][4]** Invalid parameters
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
@@ -153,8 +165,8 @@ Force output to a given format.
### Parameters
- `format` **([string][2] \| [Object][6])** as a string or an Object with an 'id' attribute
- `options` **[Object][6]** output options
* `format` **([string][2] | [Object][6])** as a string or an Object with an 'id' attribute
* `options` **[Object][6]** output options
### Examples
@@ -165,7 +177,7 @@ const data = await sharp(input)
.toBuffer();
```
- Throws **[Error][4]** unsupported format or options
* Throws **[Error][4]** unsupported format or options
Returns **Sharp**
@@ -175,20 +187,21 @@ Use these JPEG options for output image.
### Parameters
- `options` **[Object][6]?** output options
- `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
- `options.progressive` **[boolean][7]** use progressive (interlace) scan (optional, default `false`)
- `options.chromaSubsampling` **[string][2]** set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling (optional, default `'4:2:0'`)
- `options.optimiseCoding` **[boolean][7]** optimise Huffman coding tables (optional, default `true`)
- `options.optimizeCoding` **[boolean][7]** alternative spelling of optimiseCoding (optional, default `true`)
- `options.mozjpeg` **[boolean][7]** use mozjpeg defaults, equivalent to `{ trellisQuantisation: true, overshootDeringing: true, optimiseScans: true, quantisationTable: 3 }` (optional, default `false`)
- `options.trellisQuantisation` **[boolean][7]** apply trellis quantisation (optional, default `false`)
- `options.overshootDeringing` **[boolean][7]** apply overshoot deringing (optional, default `false`)
- `options.optimiseScans` **[boolean][7]** optimise progressive scans, forces progressive (optional, default `false`)
- `options.optimizeScans` **[boolean][7]** alternative spelling of optimiseScans (optional, default `false`)
- `options.quantisationTable` **[number][9]** quantization table to use, integer 0-8 (optional, default `0`)
- `options.quantizationTable` **[number][9]** alternative spelling of quantisationTable (optional, default `0`)
- `options.force` **[boolean][7]** force JPEG output, otherwise attempt to use input format (optional, default `true`)
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
* `options.progressive` **[boolean][7]** use progressive (interlace) scan (optional, default `false`)
* `options.chromaSubsampling` **[string][2]** set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling (optional, default `'4:2:0'`)
* `options.optimiseCoding` **[boolean][7]** optimise Huffman coding tables (optional, default `true`)
* `options.optimizeCoding` **[boolean][7]** alternative spelling of optimiseCoding (optional, default `true`)
* `options.mozjpeg` **[boolean][7]** use mozjpeg defaults, equivalent to `{ trellisQuantisation: true, overshootDeringing: true, optimiseScans: true, quantisationTable: 3 }` (optional, default `false`)
* `options.trellisQuantisation` **[boolean][7]** apply trellis quantisation (optional, default `false`)
* `options.overshootDeringing` **[boolean][7]** apply overshoot deringing (optional, default `false`)
* `options.optimiseScans` **[boolean][7]** optimise progressive scans, forces progressive (optional, default `false`)
* `options.optimizeScans` **[boolean][7]** alternative spelling of optimiseScans (optional, default `false`)
* `options.quantisationTable` **[number][9]** quantization table to use, integer 0-8 (optional, default `0`)
* `options.quantizationTable` **[number][9]** alternative spelling of quantisationTable (optional, default `0`)
* `options.force` **[boolean][7]** force JPEG output, otherwise attempt to use input format (optional, default `true`)
### Examples
@@ -209,7 +222,7 @@ const data = await sharp(input)
.toBuffer();
```
- Throws **[Error][4]** Invalid options
* Throws **[Error][4]** Invalid options
Returns **Sharp**
@@ -223,16 +236,17 @@ Set `palette` to `true` for slower, indexed PNG output.
### Parameters
- `options` **[Object][6]?**
- `options.progressive` **[boolean][7]** use progressive (interlace) scan (optional, default `false`)
- `options.compressionLevel` **[number][9]** zlib compression level, 0 (fastest, largest) to 9 (slowest, smallest) (optional, default `6`)
- `options.adaptiveFiltering` **[boolean][7]** use adaptive row filtering (optional, default `false`)
- `options.palette` **[boolean][7]** quantise to a palette-based image with alpha transparency support (optional, default `false`)
- `options.quality` **[number][9]** use the lowest number of colours needed to achieve given quality, sets `palette` to `true` (optional, default `100`)
- `options.colours` **[number][9]** maximum number of palette entries, sets `palette` to `true` (optional, default `256`)
- `options.colors` **[number][9]** alternative spelling of `options.colours`, sets `palette` to `true` (optional, default `256`)
- `options.dither` **[number][9]** level of Floyd-Steinberg error diffusion, sets `palette` to `true` (optional, default `1.0`)
- `options.force` **[boolean][7]** force PNG output, otherwise attempt to use input format (optional, default `true`)
* `options` **[Object][6]?**
* `options.progressive` **[boolean][7]** use progressive (interlace) scan (optional, default `false`)
* `options.compressionLevel` **[number][9]** zlib compression level, 0 (fastest, largest) to 9 (slowest, smallest) (optional, default `6`)
* `options.adaptiveFiltering` **[boolean][7]** use adaptive row filtering (optional, default `false`)
* `options.palette` **[boolean][7]** quantise to a palette-based image with alpha transparency support (optional, default `false`)
* `options.quality` **[number][9]** use the lowest number of colours needed to achieve given quality, sets `palette` to `true` (optional, default `100`)
* `options.colours` **[number][9]** maximum number of palette entries, sets `palette` to `true` (optional, default `256`)
* `options.colors` **[number][9]** alternative spelling of `options.colours`, sets `palette` to `true` (optional, default `256`)
* `options.dither` **[number][9]** level of Floyd-Steinberg error diffusion, sets `palette` to `true` (optional, default `1.0`)
* `options.force` **[boolean][7]** force PNG output, otherwise attempt to use input format (optional, default `true`)
### Examples
@@ -250,7 +264,7 @@ const data = await sharp(input)
.toBuffer();
```
- Throws **[Error][4]** Invalid options
* Throws **[Error][4]** Invalid options
Returns **Sharp**
@@ -260,17 +274,18 @@ Use these WebP options for output image.
### Parameters
- `options` **[Object][6]?** output options
- `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
- `options.alphaQuality` **[number][9]** quality of alpha layer, integer 0-100 (optional, default `100`)
- `options.lossless` **[boolean][7]** use lossless compression mode (optional, default `false`)
- `options.nearLossless` **[boolean][7]** use near_lossless compression mode (optional, default `false`)
- `options.smartSubsample` **[boolean][7]** use high quality chroma subsampling (optional, default `false`)
- `options.reductionEffort` **[number][9]** level of CPU effort to reduce file size, integer 0-6 (optional, default `4`)
- `options.pageHeight` **[number][9]?** page height for animated output
- `options.loop` **[number][9]** number of animation iterations, use 0 for infinite animation (optional, default `0`)
- `options.delay` **[Array][10]&lt;[number][9]>?** list of delays between animation frames (in milliseconds)
- `options.force` **[boolean][7]** force WebP output, otherwise attempt to use input format (optional, default `true`)
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
* `options.alphaQuality` **[number][9]** quality of alpha layer, integer 0-100 (optional, default `100`)
* `options.lossless` **[boolean][7]** use lossless compression mode (optional, default `false`)
* `options.nearLossless` **[boolean][7]** use near_lossless compression mode (optional, default `false`)
* `options.smartSubsample` **[boolean][7]** use high quality chroma subsampling (optional, default `false`)
* `options.reductionEffort` **[number][9]** level of CPU effort to reduce file size, integer 0-6 (optional, default `4`)
* `options.pageHeight` **[number][9]?** page height for animated output
* `options.loop` **[number][9]** number of animation iterations, use 0 for infinite animation (optional, default `0`)
* `options.delay` **[Array][10]<[number][9]>?** list of delays between animation frames (in milliseconds)
* `options.force` **[boolean][7]** force WebP output, otherwise attempt to use input format (optional, default `true`)
### Examples
@@ -288,7 +303,7 @@ const outputWebp = await sharp(inputWebp, { animated: true })
.toBuffer();
```
- Throws **[Error][4]** Invalid options
* Throws **[Error][4]** Invalid options
Returns **Sharp**
@@ -302,35 +317,85 @@ The prebuilt binaries do not include this - see
### Parameters
- `options` **[Object][6]?** output options
- `options.pageHeight` **[number][9]?** page height for animated output
- `options.loop` **[number][9]** number of animation iterations, use 0 for infinite animation (optional, default `0`)
- `options.delay` **[Array][10]&lt;[number][9]>?** list of delays between animation frames (in milliseconds)
- `options.force` **[boolean][7]** force GIF output, otherwise attempt to use input format (optional, default `true`)
* `options` **[Object][6]?** output options
* `options.pageHeight` **[number][9]?** page height for animated output
* `options.loop` **[number][9]** number of animation iterations, use 0 for infinite animation (optional, default `0`)
* `options.delay` **[Array][10]<[number][9]>?** list of delays between animation frames (in milliseconds)
* `options.force` **[boolean][7]** force GIF output, otherwise attempt to use input format (optional, default `true`)
- Throws **[Error][4]** Invalid options
<!---->
* Throws **[Error][4]** Invalid options
Returns **Sharp**
## jp2
Use these JP2 options for output image.
Requires libvips compiled with support for OpenJPEG.
The prebuilt binaries do not include this - see
[installing a custom libvips][11].
### Parameters
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
* `options.lossless` **[boolean][7]** use lossless compression mode (optional, default `false`)
* `options.tileWidth` **[number][9]** horizontal tile size (optional, default `512`)
* `options.tileHeight` **[number][9]** vertical tile size (optional, default `512`)
* `options.chromaSubsampling` **[string][2]** set to '4:2:0' to use chroma subsampling (optional, default `'4:4:4'`)
### Examples
```javascript
// Convert any input to lossless JP2 output
const data = await sharp(input)
.jp2({ lossless: true })
.toBuffer();
```
```javascript
// Convert any input to very high quality JP2 output
const data = await sharp(input)
.jp2({
quality: 100,
chromaSubsampling: '4:4:4'
})
.toBuffer();
```
* Throws **[Error][4]** Invalid options
Returns **Sharp**
**Meta**
* **since**: 0.29.1
## tiff
Use these TIFF options for output image.
The `density` can be set in pixels/inch via [withMetadata][1] instead of providing `xres` and `yres` in pixels/mm.
### Parameters
- `options` **[Object][6]?** output options
- `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
- `options.force` **[boolean][7]** force TIFF output, otherwise attempt to use input format (optional, default `true`)
- `options.compression` **[string][2]** compression options: lzw, deflate, jpeg, ccittfax4 (optional, default `'jpeg'`)
- `options.predictor` **[string][2]** compression predictor options: none, horizontal, float (optional, default `'horizontal'`)
- `options.pyramid` **[boolean][7]** write an image pyramid (optional, default `false`)
- `options.tile` **[boolean][7]** write a tiled tiff (optional, default `false`)
- `options.tileWidth` **[number][9]** horizontal tile size (optional, default `256`)
- `options.tileHeight` **[number][9]** vertical tile size (optional, default `256`)
- `options.xres` **[number][9]** horizontal resolution in pixels/mm (optional, default `1.0`)
- `options.yres` **[number][9]** vertical resolution in pixels/mm (optional, default `1.0`)
- `options.bitdepth` **[number][9]** reduce bitdepth to 1, 2 or 4 bit (optional, default `8`)
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `80`)
* `options.force` **[boolean][7]** force TIFF output, otherwise attempt to use input format (optional, default `true`)
* `options.compression` **[string][2]** compression options: lzw, deflate, jpeg, ccittfax4 (optional, default `'jpeg'`)
* `options.predictor` **[string][2]** compression predictor options: none, horizontal, float (optional, default `'horizontal'`)
* `options.pyramid` **[boolean][7]** write an image pyramid (optional, default `false`)
* `options.tile` **[boolean][7]** write a tiled tiff (optional, default `false`)
* `options.tileWidth` **[number][9]** horizontal tile size (optional, default `256`)
* `options.tileHeight` **[number][9]** vertical tile size (optional, default `256`)
* `options.xres` **[number][9]** horizontal resolution in pixels/mm (optional, default `1.0`)
* `options.yres` **[number][9]** vertical resolution in pixels/mm (optional, default `1.0`)
* `options.bitdepth` **[number][9]** reduce bitdepth to 1, 2 or 4 bit (optional, default `8`)
### Examples
@@ -345,7 +410,7 @@ sharp('input.svg')
.then(info => { ... });
```
- Throws **[Error][4]** Invalid options
* Throws **[Error][4]** Invalid options
Returns **Sharp**
@@ -356,22 +421,26 @@ Use these AVIF options for output image.
Whilst it is possible to create AVIF images smaller than 16x16 pixels,
most web browsers do not display these properly.
AVIF image sequences are not supported.
### Parameters
- `options` **[Object][6]?** output options
- `options.quality` **[number][9]** quality, integer 1-100 (optional, default `50`)
- `options.lossless` **[boolean][7]** use lossless compression (optional, default `false`)
- `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest) (optional, default `5`)
- `options.chromaSubsampling` **[string][2]** set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0 (optional, default `'4:2:0'`)
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `50`)
* `options.lossless` **[boolean][7]** use lossless compression (optional, default `false`)
* `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest) (optional, default `5`)
* `options.chromaSubsampling` **[string][2]** set to '4:2:0' to use chroma subsampling (optional, default `'4:4:4'`)
- Throws **[Error][4]** Invalid options
<!---->
* Throws **[Error][4]** Invalid options
Returns **Sharp**
**Meta**
- **since**: 0.27.0
* **since**: 0.27.0
## heif
@@ -382,48 +451,56 @@ globally-installed libvips compiled with support for libheif, libde265 and x265.
### Parameters
- `options` **[Object][6]?** output options
- `options.quality` **[number][9]** quality, integer 1-100 (optional, default `50`)
- `options.compression` **[string][2]** compression format: av1, hevc (optional, default `'av1'`)
- `options.lossless` **[boolean][7]** use lossless compression (optional, default `false`)
- `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest) (optional, default `5`)
- `options.chromaSubsampling` **[string][2]** set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0 (optional, default `'4:2:0'`)
* `options` **[Object][6]?** output options
* `options.quality` **[number][9]** quality, integer 1-100 (optional, default `50`)
* `options.compression` **[string][2]** compression format: av1, hevc (optional, default `'av1'`)
* `options.lossless` **[boolean][7]** use lossless compression (optional, default `false`)
* `options.speed` **[number][9]** CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest) (optional, default `5`)
* `options.chromaSubsampling` **[string][2]** set to '4:2:0' to use chroma subsampling (optional, default `'4:4:4'`)
- Throws **[Error][4]** Invalid options
<!---->
* Throws **[Error][4]** Invalid options
Returns **Sharp**
**Meta**
- **since**: 0.23.0
* **since**: 0.23.0
## raw
Force output to be raw, uncompressed, 8-bit unsigned integer (unit8) pixel data.
Force output to be raw, uncompressed pixel data.
Pixel ordering is left-to-right, top-to-bottom, without padding.
Channel ordering will be RGB or RGBA for non-greyscale colourspaces.
### Parameters
* `options` **[Object][6]?** output options
* `options.depth` **[string][2]** bit depth, one of: char, uchar (default), short, ushort, int, uint, float, complex, double, dpcomplex (optional, default `'uchar'`)
### Examples
```javascript
// Extract raw RGB pixel data from JPEG input
// Extract raw, unsigned 8-bit RGB pixel data from JPEG input
const { data, info } = await sharp('input.jpg')
.raw()
.toBuffer({ resolveWithObject: true });
```
```javascript
// Extract alpha channel as raw pixel data from PNG input
// Extract alpha channel as raw, unsigned 16-bit pixel data from PNG input
const data = await sharp('input.png')
.ensureAlpha()
.extractChannel(3)
.toColourspace('b-w')
.raw()
.raw({ depth: 'ushort' })
.toBuffer();
```
Returns **Sharp**
* Throws **[Error][4]** Invalid options
## tile
@@ -435,18 +512,19 @@ Warning: multiple sharp instances concurrently producing tile output can expose
### Parameters
- `options` **[Object][6]?**
- `options.size` **[number][9]** tile size in pixels, a value between 1 and 8192. (optional, default `256`)
- `options.overlap` **[number][9]** tile overlap in pixels, a value between 0 and 8192. (optional, default `0`)
- `options.angle` **[number][9]** tile angle of rotation, must be a multiple of 90. (optional, default `0`)
- `options.background` **([string][2] \| [Object][6])** background colour, parsed by the [color][12] module, defaults to white without transparency. (optional, default `{r:255,g:255,b:255,alpha:1}`)
- `options.depth` **[string][2]?** how deep to make the pyramid, possible values are `onepixel`, `onetile` or `one`, default based on layout.
- `options.skipBlanks` **[number][9]** threshold to skip tile generation, a value 0 - 255 for 8-bit images or 0 - 65535 for 16-bit images (optional, default `-1`)
- `options.container` **[string][2]** tile container, with value `fs` (filesystem) or `zip` (compressed file). (optional, default `'fs'`)
- `options.layout` **[string][2]** filesystem layout, possible values are `dz`, `iiif`, `zoomify` or `google`. (optional, default `'dz'`)
- `options.centre` **[boolean][7]** centre image in tile. (optional, default `false`)
- `options.center` **[boolean][7]** alternative spelling of centre. (optional, default `false`)
- `options.id` **[string][2]** when `layout` is `iiif`, sets the `@id` attribute of `info.json` (optional, default `'https://example.com/iiif'`)
* `options` **[Object][6]?**
* `options.size` **[number][9]** tile size in pixels, a value between 1 and 8192. (optional, default `256`)
* `options.overlap` **[number][9]** tile overlap in pixels, a value between 0 and 8192. (optional, default `0`)
* `options.angle` **[number][9]** tile angle of rotation, must be a multiple of 90. (optional, default `0`)
* `options.background` **([string][2] | [Object][6])** background colour, parsed by the [color][12] module, defaults to white without transparency. (optional, default `{r:255,g:255,b:255,alpha:1}`)
* `options.depth` **[string][2]?** how deep to make the pyramid, possible values are `onepixel`, `onetile` or `one`, default based on layout.
* `options.skipBlanks` **[number][9]** threshold to skip tile generation, a value 0 - 255 for 8-bit images or 0 - 65535 for 16-bit images (optional, default `-1`)
* `options.container` **[string][2]** tile container, with value `fs` (filesystem) or `zip` (compressed file). (optional, default `'fs'`)
* `options.layout` **[string][2]** filesystem layout, possible values are `dz`, `iiif`, `zoomify` or `google`. (optional, default `'dz'`)
* `options.centre` **[boolean][7]** centre image in tile. (optional, default `false`)
* `options.center` **[boolean][7]** alternative spelling of centre. (optional, default `false`)
* `options.id` **[string][2]** when `layout` is `iiif`, sets the `@id` attribute of `info.json` (optional, default `'https://example.com/iiif'`)
### Examples
@@ -462,10 +540,30 @@ sharp('input.tiff')
});
```
- Throws **[Error][4]** Invalid parameters
* Throws **[Error][4]** Invalid parameters
Returns **Sharp**
## timeout
Set a timeout for processing, in seconds.
Use a value of zero to continue processing indefinitely, the default behaviour.
The clock starts when libvips opens an input image for processing.
Time spent waiting for a libuv thread to become available is not included.
### Parameters
* `options` **[Object][6]**
* `options.seconds` **[number][9]** Number of seconds after which processing will be stopped
Returns **Sharp**
**Meta**
* **since**: 0.29.2
[1]: #withmetadata
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String

View File

@@ -6,49 +6,50 @@ Resize image to `width`, `height` or `width x height`.
When both a `width` and `height` are provided, the possible methods by which the image should **fit** these are:
- `cover`: (default) Preserving aspect ratio, ensure the image covers both provided dimensions by cropping/clipping to fit.
- `contain`: Preserving aspect ratio, contain within both provided dimensions using "letterboxing" where necessary.
- `fill`: Ignore the aspect ratio of the input and stretch to both provided dimensions.
- `inside`: Preserving aspect ratio, resize the image to be as large as possible while ensuring its dimensions are less than or equal to both those specified.
- `outside`: Preserving aspect ratio, resize the image to be as small as possible while ensuring its dimensions are greater than or equal to both those specified.
* `cover`: (default) Preserving aspect ratio, ensure the image covers both provided dimensions by cropping/clipping to fit.
* `contain`: Preserving aspect ratio, contain within both provided dimensions using "letterboxing" where necessary.
* `fill`: Ignore the aspect ratio of the input and stretch to both provided dimensions.
* `inside`: Preserving aspect ratio, resize the image to be as large as possible while ensuring its dimensions are less than or equal to both those specified.
* `outside`: Preserving aspect ratio, resize the image to be as small as possible while ensuring its dimensions are greater than or equal to both those specified.
Some of these values are based on the [object-fit][1] CSS property.
When using a `fit` of `cover` or `contain`, the default **position** is `centre`. Other options are:
- `sharp.position`: `top`, `right top`, `right`, `right bottom`, `bottom`, `left bottom`, `left`, `left top`.
- `sharp.gravity`: `north`, `northeast`, `east`, `southeast`, `south`, `southwest`, `west`, `northwest`, `center` or `centre`.
- `sharp.strategy`: `cover` only, dynamically crop using either the `entropy` or `attention` strategy.
* `sharp.position`: `top`, `right top`, `right`, `right bottom`, `bottom`, `left bottom`, `left`, `left top`.
* `sharp.gravity`: `north`, `northeast`, `east`, `southeast`, `south`, `southwest`, `west`, `northwest`, `center` or `centre`.
* `sharp.strategy`: `cover` only, dynamically crop using either the `entropy` or `attention` strategy.
Some of these values are based on the [object-position][2] CSS property.
The experimental strategy-based approach resizes so one dimension is at its target length
then repeatedly ranks edge regions, discarding the edge with the lowest score based on the selected strategy.
- `entropy`: focus on the region with the highest [Shannon entropy][3].
- `attention`: focus on the region with the highest luminance frequency, colour saturation and presence of skin tones.
* `entropy`: focus on the region with the highest [Shannon entropy][3].
* `attention`: focus on the region with the highest luminance frequency, colour saturation and presence of skin tones.
Possible interpolation kernels are:
- `nearest`: Use [nearest neighbour interpolation][4].
- `cubic`: Use a [Catmull-Rom spline][5].
- `mitchell`: Use a [Mitchell-Netravali spline][6].
- `lanczos2`: Use a [Lanczos kernel][7] with `a=2`.
- `lanczos3`: Use a Lanczos kernel with `a=3` (the default).
* `nearest`: Use [nearest neighbour interpolation][4].
* `cubic`: Use a [Catmull-Rom spline][5].
* `mitchell`: Use a [Mitchell-Netravali spline][6].
* `lanczos2`: Use a [Lanczos kernel][7] with `a=2`.
* `lanczos3`: Use a Lanczos kernel with `a=3` (the default).
### Parameters
- `width` **[number][8]?** pixels wide the resultant image should be. Use `null` or `undefined` to auto-scale the width to match the height.
- `height` **[number][8]?** pixels high the resultant image should be. Use `null` or `undefined` to auto-scale the height to match the width.
- `options` **[Object][9]?**
- `options.width` **[String][10]?** alternative means of specifying `width`. If both are present this take priority.
- `options.height` **[String][10]?** alternative means of specifying `height`. If both are present this take priority.
- `options.fit` **[String][10]** how the image should be resized to fit both provided dimensions, one of `cover`, `contain`, `fill`, `inside` or `outside`. (optional, default `'cover'`)
- `options.position` **[String][10]** position, gravity or strategy to use when `fit` is `cover` or `contain`. (optional, default `'centre'`)
- `options.background` **([String][10] \| [Object][9])** background colour when using a `fit` of `contain`, parsed by the [color][11] module, defaults to black without transparency. (optional, default `{r:0,g:0,b:0,alpha:1}`)
- `options.kernel` **[String][10]** the kernel to use for image reduction. (optional, default `'lanczos3'`)
- `options.withoutEnlargement` **[Boolean][12]** do not enlarge if the width _or_ height are already less than the specified dimensions, equivalent to GraphicsMagick's `>` geometry option. (optional, default `false`)
- `options.fastShrinkOnLoad` **[Boolean][12]** take greater advantage of the JPEG and WebP shrink-on-load feature, which can lead to a slight moiré pattern on some images. (optional, default `true`)
* `width` **[number][8]?** pixels wide the resultant image should be. Use `null` or `undefined` to auto-scale the width to match the height.
* `height` **[number][8]?** pixels high the resultant image should be. Use `null` or `undefined` to auto-scale the height to match the width.
* `options` **[Object][9]?**
* `options.width` **[String][10]?** alternative means of specifying `width`. If both are present this take priority.
* `options.height` **[String][10]?** alternative means of specifying `height`. If both are present this take priority.
* `options.fit` **[String][10]** how the image should be resized to fit both provided dimensions, one of `cover`, `contain`, `fill`, `inside` or `outside`. (optional, default `'cover'`)
* `options.position` **[String][10]** position, gravity or strategy to use when `fit` is `cover` or `contain`. (optional, default `'centre'`)
* `options.background` **([String][10] | [Object][9])** background colour when using a `fit` of `contain`, parsed by the [color][11] module, defaults to black without transparency. (optional, default `{r:0,g:0,b:0,alpha:1}`)
* `options.kernel` **[String][10]** the kernel to use for image reduction. (optional, default `'lanczos3'`)
* `options.withoutEnlargement` **[Boolean][12]** do not enlarge if the width *or* height are already less than the specified dimensions, equivalent to GraphicsMagick's `>` geometry option. (optional, default `false`)
* `options.fastShrinkOnLoad` **[Boolean][12]** take greater advantage of the JPEG and WebP shrink-on-load feature, which can lead to a slight moiré pattern on some images. (optional, default `true`)
### Examples
@@ -125,7 +126,7 @@ const scaleByHalf = await sharp(input)
);
```
- Throws **[Error][13]** Invalid parameters
* Throws **[Error][13]** Invalid parameters
Returns **Sharp**
@@ -136,12 +137,13 @@ This operation will always occur after resizing and extraction, if any.
### Parameters
- `extend` **([number][8] \| [Object][9])** single pixel count to add to all edges or an Object with per-edge counts
- `extend.top` **[number][8]** (optional, default `0`)
- `extend.left` **[number][8]** (optional, default `0`)
- `extend.bottom` **[number][8]** (optional, default `0`)
- `extend.right` **[number][8]** (optional, default `0`)
- `extend.background` **([String][10] \| [Object][9])** background colour, parsed by the [color][11] module, defaults to black without transparency. (optional, default `{r:0,g:0,b:0,alpha:1}`)
* `extend` **([number][8] | [Object][9])** single pixel count to add to all edges or an Object with per-edge counts
* `extend.top` **[number][8]** (optional, default `0`)
* `extend.left` **[number][8]** (optional, default `0`)
* `extend.bottom` **[number][8]** (optional, default `0`)
* `extend.right` **[number][8]** (optional, default `0`)
* `extend.background` **([String][10] | [Object][9])** background colour, parsed by the [color][11] module, defaults to black without transparency. (optional, default `{r:0,g:0,b:0,alpha:1}`)
### Examples
@@ -170,7 +172,7 @@ sharp(input)
...
```
- Throws **[Error][13]** Invalid parameters
* Throws **[Error][13]** Invalid parameters
Returns **Sharp**
@@ -178,17 +180,18 @@ Returns **Sharp**
Extract/crop a region of the image.
- Use `extract` before `resize` for pre-resize extraction.
- Use `extract` after `resize` for post-resize extraction.
- Use `extract` before and after for both.
* Use `extract` before `resize` for pre-resize extraction.
* Use `extract` after `resize` for post-resize extraction.
* Use `extract` before and after for both.
### Parameters
- `options` **[Object][9]** describes the region to extract using integral pixel values
- `options.left` **[number][8]** zero-indexed offset from left edge
- `options.top` **[number][8]** zero-indexed offset from top edge
- `options.width` **[number][8]** width of region to extract
- `options.height` **[number][8]** height of region to extract
* `options` **[Object][9]** describes the region to extract using integral pixel values
* `options.left` **[number][8]** zero-indexed offset from left edge
* `options.top` **[number][8]** zero-indexed offset from top edge
* `options.width` **[number][8]** width of region to extract
* `options.height` **[number][8]** height of region to extract
### Examples
@@ -210,7 +213,7 @@ sharp(input)
});
```
- Throws **[Error][13]** Invalid parameters
* Throws **[Error][13]** Invalid parameters
Returns **Sharp**
@@ -224,10 +227,11 @@ will contain `trimOffsetLeft` and `trimOffsetTop` properties.
### Parameters
- `threshold` **[number][8]** the allowed difference from the top-left pixel, a number greater than zero. (optional, default `10`)
* `threshold` **[number][8]** the allowed difference from the top-left pixel, a number greater than zero. (optional, default `10`)
<!---->
- Throws **[Error][13]** Invalid parameters
* Throws **[Error][13]** Invalid parameters
Returns **Sharp**

View File

@@ -54,17 +54,18 @@ console.log(sharp.versions);
## cache
Gets or, when options are provided, sets the limits of _libvips'_ operation cache.
Gets or, when options are provided, sets the limits of *libvips'* operation cache.
Existing entries in the cache will be trimmed after any change in limits.
This method always returns cache statistics,
useful for determining how much working memory is required for a particular task.
### Parameters
- `options` **([Object][1] \| [boolean][10])** Object with the following attributes, or boolean where true uses default cache settings and false removes all caching (optional, default `true`)
- `options.memory` **[number][11]** is the maximum memory in MB to use for this cache (optional, default `50`)
- `options.files` **[number][11]** is the maximum number of files to hold open (optional, default `20`)
- `options.items` **[number][11]** is the maximum number of operations to cache (optional, default `100`)
* `options` **([Object][1] | [boolean][10])** Object with the following attributes, or boolean where true uses default cache settings and false removes all caching (optional, default `true`)
* `options.memory` **[number][11]** is the maximum memory in MB to use for this cache (optional, default `50`)
* `options.files` **[number][11]** is the maximum number of files to hold open (optional, default `20`)
* `options.items` **[number][11]** is the maximum number of operations to cache (optional, default `100`)
### Examples
@@ -83,7 +84,7 @@ Returns **[Object][1]**
## concurrency
Gets or, when a concurrency is provided, sets
the number of threads _libvips'_ should create to process each image.
the number of threads *libvips'* should create to process each image.
The default value is the number of CPU cores,
except when using glibc-based Linux without jemalloc,
@@ -98,7 +99,7 @@ This method always returns the current concurrency.
### Parameters
- `concurrency` **[number][11]?**
* `concurrency` **[number][11]?**
### Examples
@@ -114,8 +115,8 @@ Returns **[number][11]** concurrency
An EventEmitter that emits a `change` event when a task is either:
- queued, waiting for _libuv_ to provide a worker thread
- complete
* queued, waiting for *libuv* to provide a worker thread
* complete
### Examples
@@ -129,8 +130,8 @@ sharp.queue.on('change', function(queueLength) {
Provides access to internal task counters.
- queue is the number of tasks this module has queued waiting for _libuv_ to provide a worker thread from its pool.
- process is the number of resize tasks currently being processed.
* queue is the number of tasks this module has queued waiting for *libuv* to provide a worker thread from its pool.
* process is the number of resize tasks currently being processed.
### Examples
@@ -150,7 +151,7 @@ by taking advantage of the SIMD vector unit of the CPU, e.g. Intel SSE and ARM N
### Parameters
- `simd` **[boolean][10]** (optional, default `true`)
* `simd` **[boolean][10]** (optional, default `true`)
### Examples

View File

@@ -1,9 +1,138 @@
# Changelog
## v0.29 - *circle*
Requires libvips v8.11.3
### v0.29.3 - 14th November 2021
* Ensure correct dimensions when containing image resized to 1px.
[#2951](https://github.com/lovell/sharp/issues/2951)
* Impute TIFF `xres`/`yres` from `density` provided to `withMetadata`.
[#2952](https://github.com/lovell/sharp/pull/2952)
[@mbklein](https://github.com/mbklein)
### v0.29.2 - 21st October 2021
* Add `timeout` function to limit processing time.
* Ensure `sharp.versions` is populated from vendored libvips.
* Remove animation properties from single page images.
[#2890](https://github.com/lovell/sharp/issues/2890)
* Allow use of 'tif' to select TIFF output.
[#2893](https://github.com/lovell/sharp/pull/2893)
[@erf](https://github.com/erf)
* Improve error message on Windows for version conflict.
[#2918](https://github.com/lovell/sharp/pull/2918)
[@dkrnl](https://github.com/dkrnl)
* Throw error rather than exit when invalid binaries detected.
[#2931](https://github.com/lovell/sharp/issues/2931)
### v0.29.1 - 7th September 2021
* Add `lightness` option to `modulate` operation.
[#2846](https://github.com/lovell/sharp/pull/2846)
* Ensure correct PNG bitdepth is set based on number of colours.
[#2855](https://github.com/lovell/sharp/issues/2855)
* Ensure background is always premultiplied when compositing.
[#2858](https://github.com/lovell/sharp/issues/2858)
* Ensure images with P3 profiles retain full gamut.
[#2862](https://github.com/lovell/sharp/issues/2862)
* Add support for libvips compiled with OpenJPEG.
[#2868](https://github.com/lovell/sharp/pull/2868)
* Remove unsupported animation properties from AVIF output.
[#2870](https://github.com/lovell/sharp/issues/2870)
* Resolve paths before comparing input/output filenames.
[#2878](https://github.com/lovell/sharp/pull/2878)
[@rexxars](https://github.com/rexxars)
* Allow use of speed 9 (fastest) for HEIF encoding.
[#2879](https://github.com/lovell/sharp/pull/2879)
[@rexxars](https://github.com/rexxars)
### v0.29.0 - 17th August 2021
* Drop support for Node.js 10, now requires Node.js >= 12.13.0.
* Add `background` property to PNG and GIF image metadata.
* Add `compression` property to HEIF image metadata.
[#2504](https://github.com/lovell/sharp/issues/2504)
* AVIF encoding now defaults to `4:4:4` chroma subsampling.
[#2562](https://github.com/lovell/sharp/issues/2562)
* Allow multiple platform-arch binaries in same `node_modules` installation tree.
[#2575](https://github.com/lovell/sharp/issues/2575)
* Default to single-channel `b-w` space when `extractChannel` is used.
[#2658](https://github.com/lovell/sharp/issues/2658)
* Allow installation directory to contain spaces (regression in v0.26.0).
[#2777](https://github.com/lovell/sharp/issues/2777)
* Add `pipelineColourspace` operator to set the processing space.
[#2704](https://github.com/lovell/sharp/pull/2704)
[@Daiz](https://github.com/Daiz)
* Allow bit depth to be set when using raw input and output.
[#2762](https://github.com/lovell/sharp/pull/2762)
[@mart-jansink](https://github.com/mart-jansink)
* Allow `negate` to act only on non-alpha channels.
[#2808](https://github.com/lovell/sharp/pull/2808)
[@rexxars](https://github.com/rexxars)
## v0.28 - *bijou*
Requires libvips v8.10.6
### v0.28.3 - 24th May 2021
* Ensure presence of libvips, vendored or global, before invoking node-gyp.
* Skip shrink-on-load for multi-page WebP.
[#2714](https://github.com/lovell/sharp/issues/2714)
* Add contrast limiting adaptive histogram equalization (CLAHE) operator.
[#2726](https://github.com/lovell/sharp/pull/2726)
[@baparham](https://github.com/baparham)
### v0.28.2 - 10th May 2021
* Allow `withMetadata` to set `density`.
[#967](https://github.com/lovell/sharp/issues/967)
* Skip shrink-on-load where one dimension <4px.
[#2653](https://github.com/lovell/sharp/issues/2653)
* Allow escaped proxy credentials.
[#2664](https://github.com/lovell/sharp/pull/2664)
[@msalettes](https://github.com/msalettes)
* Add `premultiplied` flag for raw pixel data input.
[#2685](https://github.com/lovell/sharp/pull/2685)
[@mnutt](https://github.com/mnutt)
* Detect empty input and throw a helpful error.
[#2687](https://github.com/lovell/sharp/pull/2687)
[@JakobJingleheimer](https://github.com/JakobJingleheimer)
* Add install-time flag to skip version compatibility checks.
[#2692](https://github.com/lovell/sharp/pull/2692)
[@xemle](https://github.com/xemle)
### v0.28.1 - 5th April 2021
* Ensure all installation errors are logged with a more obvious prefix.

1
docs/docute.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -6,7 +6,6 @@
".*",
"build.js",
"firebase.json",
"*.md",
"image/**",
"search-index/**"
],
@@ -14,10 +13,9 @@
{
"source": "**",
"headers": [
{
"key": "Cache-Control",
"value": "max-age=86400"
}
{ "key": "Cache-Control", "value": "max-age=86400" },
{ "key": "X-Content-Type-Options", "value": "nosniff" },
{ "key": "X-Frame-Options", "value": "SAMEORIGIN" }
]
}
],

View File

@@ -209,3 +209,21 @@ GitHub: https://github.com/beig
Name: Florian Busch
GitHub: https://github.com/florian-busch
Name: Matthieu Salettes
GitHub: https://github.com/msalettes
Name: Taneli Vatanen
GitHub: https://github.com/Daiz
Name: Mart Jansink
GitHub: https://github.com/mart-jansink
Name: Tenpi
GitHub: https://github.com/Tenpi
Name: Zaruike
GitHub: https://github.com/Zaruike
Name: Erlend F
GitHub: https://github.com/erf

File diff suppressed because one or more lines are too long

View File

@@ -10,20 +10,20 @@ yarn add sharp
## Prerequisites
* Node.js v10+
* Node.js >= 12.13.0
## Prebuilt binaries
Ready-compiled sharp and libvips binaries are provided for use with
Node.js v10+ on the most common platforms:
Ready-compiled sharp and libvips binaries are provided for use on the most common platforms:
* macOS x64 (>= 10.13)
* macOS ARM64
* Linux x64 (glibc >= 2.17, musl >= 1.1.24)
* Linux ARM64 (glibc >= 2.29, musl >= 1.1.24)
* Windows x64
* Windows x86
An ~7.5MB tarball containing libvips and its most commonly used dependencies
An ~7MB tarball containing libvips and its most commonly used dependencies
is downloaded via HTTPS and stored within `node_modules/sharp/vendor` during `npm install`.
This provides support for the
@@ -31,9 +31,8 @@ JPEG, PNG, WebP, AVIF, TIFF, GIF (input) and SVG (input) image formats.
The following platforms have prebuilt libvips but not sharp:
* macOS ARM64
* Linux ARMv6
* Linux ARMv7 (glibc >= 2.28)
* Linux ARMv6 (glibc >= 2.28)
* Windows ARM64
The following platforms require compilation of both libvips and sharp from source:
@@ -41,6 +40,8 @@ The following platforms require compilation of both libvips and sharp from sourc
* Linux x86
* Linux x64 (glibc <= 2.16, includes RHEL/CentOS 6)
* Linux ARM64 (glibc <= 2.28)
* Linux ARMv7 (glibc <= 2.27, musl)
* Linux ARMv6 (glibc <= 2.27, musl)
* Linux PowerPC
* FreeBSD
* OpenBSD
@@ -49,6 +50,7 @@ The following platforms require compilation of both libvips and sharp from sourc
The architecture and platform of Node.js used for `npm install`
must be the same as the architecture and platform of Node.js used at runtime.
See the [cross-platform](#cross-platform) section if this is not the case.
When using npm v6 or earlier, the `npm install --unsafe-perm` flag must be used when installing as `root` or a `sudo` user.
@@ -60,15 +62,36 @@ Check the output of running `npm install --verbose sharp` for useful error messa
## Apple M1
Prebuilt libvips binaries are provided for macOS on ARM64 (since sharp v0.28.0).
Prebuilt sharp and libvips binaries are provided for macOS on ARM64 from sharp v0.29.0.
During `npm install` sharp will be built locally,
which requires Xcode and Python - see
[building from source](#building-from-source).
Prebuilt libvips binaries were provided for macOS on ARM64 from sharp v0.28.0.
When this new ARM64 CPU is made freely available
to open source projects via a CI service
then prebuilt sharp binaries can also be provided.
## Cross-platform
At `npm install` time, prebuilt binaries are automatically selected for the
current OS platform and CPU architecture, where available.
The target platform and/or architecture can be manually selected using the following flags.
```sh
npm install --platform=... --arch=... --arm-version=... sharp
```
* `--platform`: one of `linux`, `linuxmusl`, `darwin` or `win32`.
* `--arch`: one of `x64`, `ia32`, `arm` or `arm64`.
* `--arm-version`: one of `6`, `7` or `8` (`arm` defaults to `6`, `arm64` defaults to `8`).
* `--sharp-install-force`: skip version compatibility checks.
These values can also be set via environment variables,
`npm_config_platform`, `npm_config_arch`, `npm_config_arm_version`
and `SHARP_INSTALL_FORCE` respectively.
For example, if the target machine has a 64-bit ARM CPU and is running Alpine Linux,
use the following flags:
```sh
npm install --arch=arm64 --platform=linuxmusl sharp
```
## Custom libvips
@@ -110,20 +133,16 @@ To install the prebuilt libvips binaries from a custom URL,
set the `sharp_libvips_binary_host` npm config option
or the `npm_config_sharp_libvips_binary_host` environment variable.
The version subpath and file name are appended to these. There should be tarballs available
that are compressed with both gzip and Brotli, as the format downloaded will vary depending
on whether the user's version of Node supports Brotli decompression (Node.js v10.16.0+)
The version subpath and file name are appended to these.
For example, if `sharp_libvips_binary_host` is set to `https://hostname/path`
and the libvips version is `1.2.3` then the resultant URL will be
`https://hostname/path/v1.2.3/libvips-1.2.3-platform-arch.tar.br` or
`https://hostname/path/v1.2.3/libvips-1.2.3-platform-arch.tar.gz`.
`https://hostname/path/v1.2.3/libvips-1.2.3-platform-arch.tar.br`.
See the Chinese mirror below for a further example.
## Chinese mirror
Alibaba provide a mirror site based in China containing binaries for both sharp and libvips.
A mirror site based in China, provided by Alibaba, contains binaries for both sharp and libvips.
To use this either set the following configuration:
@@ -182,32 +201,18 @@ to `false` when using the `yarn` package manager.
## AWS Lambda
The binaries in the `node_modules` directory of the
The `node_modules` directory of the
[deployment package](https://docs.aws.amazon.com/lambda/latest/dg/nodejs-package.html)
must be for the Linux x64 platform.
must include binaries for the Linux x64 platform.
When building your deployment package on machines other than Linux x64 (glibc),
run the following commands:
run the following additional command after `npm install`:
macOS:
```sh
rm -rf node_modules/sharp
npm install
SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux sharp
```
Windows:
```sh
rmdir /s /q node_modules/sharp
npm install --arch=x64 --platform=linux sharp
```
Alternatively a Docker container closely matching the Lambda runtime can be used:
```sh
rm -rf node_modules/sharp
docker run -v "$PWD":/var/task lambci/lambda:build-nodejs12.x npm install sharp
```
To get the best performance select the largest memory available.
A 1536 MB function provides ~12x more CPU time than a 128 MB function.

View File

@@ -5,9 +5,11 @@ A test to benchmark the performance of this module relative to alternatives.
## The contenders
* [jimp](https://www.npmjs.com/package/jimp) v0.16.1 - Image processing in pure JavaScript. Provides bicubic interpolation.
* [mapnik](https://www.npmjs.org/package/mapnik) v4.5.6 - Whilst primarily a map renderer, Mapnik contains bitmap image utilities.
* [mapnik](https://www.npmjs.org/package/mapnik) v4.5.8 - Whilst primarily a map renderer, Mapnik contains bitmap image utilities.
* [imagemagick](https://www.npmjs.com/package/imagemagick) v0.1.3 - Supports filesystem only and "*has been unmaintained for a long time*".
* [gm](https://www.npmjs.com/package/gm) v1.23.1 - Fully featured wrapper around GraphicsMagick's `gm` command line utility.
* [@squoosh/lib](https://www.npmjs.com/package/@squoosh/lib) v0.4.0 - Image libraries transpiled to WebAssembly, includes GPLv3 code.
* [@squoosh/cli](https://www.npmjs.com/package/@squoosh/cli) v0.7.2 - Command line wrapper around `@squoosh/lib`, avoids GPLv3 by spawning process.
* sharp v0.28.0 / libvips v8.10.6 - Caching within libvips disabled to ensure a fair comparison.
## The task
@@ -19,21 +21,23 @@ then compress to JPEG at a "quality" setting of 80.
## Test environment
* AWS EC2 eu-west-1 [c5ad.xlarge](https://aws.amazon.com/ec2/instance-types/c5/) (4x AMD EPYC 7R32)
* Ubuntu 20.10 (ami-03f10415e8b0bfb86)
* Node.js v14.16.0
* Ubuntu 21.04 (ami-0d7626a9c2ceab1ac)
* Node.js 16.6.2
## Results
| Module | Input | Output | Ops/sec | Speed-up |
| :----------------- | :----- | :----- | ------: | -------: |
| jimp | buffer | buffer | 0.78 | 1.0 |
| mapnik | buffer | buffer | 3.39 | 4.3 |
| gm | buffer | buffer | 7.84 | 10.1 |
| gm | file | file | 9.24 | 11.8 |
| imagemagick | file | file | 9.37 | 12.0 |
| sharp | stream | stream | 26.84 | 34.4 |
| sharp | file | file | 29.76 | 38.2 |
| sharp | buffer | buffer | 31.60 | 40.5 |
| jimp | buffer | buffer | 0.83 | 1.0 |
| squoosh-cli | file | file | 1.09 | 1.3 |
| squoosh-lib | buffer | buffer | 1.83 | 2.2 |
| mapnik | buffer | buffer | 3.41 | 4.1 |
| gm | buffer | buffer | 8.34 | 10.0 |
| imagemagick | file | file | 8.67 | 10.4 |
| gm | file | file | 8.82 | 10.6 |
| sharp | stream | stream | 29.44 | 35.5 |
| sharp | file | file | 29.64 | 35.7 |
| sharp | buffer | buffer | 31.09 | 37.5 |
Greater libvips performance can be expected with caching enabled (default)
and using 8+ core machines, especially those with larger L1/L2 CPU caches.

File diff suppressed because one or more lines are too long

View File

@@ -2,7 +2,7 @@
const fs = require('fs');
const path = require('path');
const { extractDescription, extractKeywords } = require('./extract');
const { extractDescription, extractKeywords, extractParameters } = require('./extract');
const searchIndex = [];
@@ -37,18 +37,19 @@ for (const match of matches) {
].forEach((section) => {
const contents = fs.readFileSync(path.join(__dirname, '..', `api-${section}.md`), 'utf8');
const matches = contents.matchAll(
/\n## (?<title>[A-Za-z]+)\n\n(?<firstparagraph>.+?)\n\n/gs
/\n## (?<title>[A-Za-z]+)\n\n(?<firstparagraph>.+?)\n\n(?<parameters>### Parameters.+?Returns)?/gs
);
for (const match of matches) {
const { title, firstparagraph } = match.groups;
const { title, firstparagraph, parameters } = match.groups;
const description = firstparagraph.startsWith('###')
? 'Constructor'
: extractDescription(firstparagraph);
const parameterNames = parameters ? extractParameters(parameters) : '';
searchIndex.push({
t: title,
d: description,
k: extractKeywords(`${title} ${description}`),
k: extractKeywords(`${title} ${description} ${parameterNames}`),
l: `/api-${section}#${title.toLowerCase()}`
});
}

View File

@@ -4,6 +4,7 @@ const stopWords = require('./stop-words');
const extractDescription = (str) =>
str
.replace(/### Examples.*/sg, '')
.replace(/\(http[^)]+/g, '')
.replace(/\s+/g, ' ')
.replace(/[^A-Za-z0-9_/\-,. ]/g, '')
@@ -11,6 +12,11 @@ const extractDescription = (str) =>
.substr(0, 180)
.trim();
const extractParameters = (str) =>
[...str.matchAll(/options\.(?<name>[^.`]+)/gs)]
.map((match) => match.groups.name)
.join(' ');
const extractKeywords = (str) =>
[
...new Set(
@@ -21,4 +27,4 @@ const extractKeywords = (str) =>
)
].join(' ');
module.exports = { extractDescription, extractKeywords };
module.exports = { extractDescription, extractKeywords, extractParameters };

View File

@@ -17,6 +17,7 @@ module.exports = [
'before',
'both',
'call',
'callback',
'can',
'containing',
'default',
@@ -26,6 +27,7 @@ module.exports = [
'ensure',
'etc',
'every',
'except',
'for',
'from',
'get',
@@ -49,12 +51,16 @@ module.exports = [
'occur',
'occurs',
'options',
'other',
'out',
'over',
'perform',
'performs',
'provide',
'provided',
'ready',
'requires',
'returned',
'same',
'see',
'set',
@@ -67,9 +73,11 @@ module.exports = [
'supported',
'sure',
'take',
'than',
'that',
'the',
'their',
'then',
'there',
'therefore',
'these',

11
install/can-compile.js Normal file
View File

@@ -0,0 +1,11 @@
'use strict';
const libvips = require('../lib/libvips');
try {
if (!(libvips.useGlobalLibvips() || libvips.hasVendoredLibvips())) {
process.exitCode = 1;
}
} catch (err) {
process.exitCode = 1;
}

View File

@@ -4,19 +4,19 @@ const fs = require('fs');
const path = require('path');
const libvips = require('../lib/libvips');
const platform = require('../lib/platform');
const minimumLibvipsVersion = libvips.minimumLibvipsVersion;
const platform = process.env.npm_config_platform || process.platform;
if (platform === 'win32') {
const buildDir = path.join(__dirname, '..', 'build');
const buildReleaseDir = path.join(buildDir, 'Release');
const platformAndArch = platform();
if (platformAndArch.startsWith('win32')) {
const buildReleaseDir = path.join(__dirname, '..', 'build', 'Release');
libvips.log(`Creating ${buildReleaseDir}`);
try {
libvips.mkdirSync(buildDir);
libvips.mkdirSync(buildReleaseDir);
} catch (err) {}
const vendorLibDir = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, 'lib');
const vendorLibDir = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, platformAndArch, 'lib');
libvips.log(`Copying DLLs from ${vendorLibDir} to ${buildReleaseDir}`);
try {
fs

View File

@@ -7,7 +7,8 @@ const stream = require('stream');
const zlib = require('zlib');
const detectLibc = require('detect-libc');
const semver = require('semver');
const semverLessThan = require('semver/functions/lt');
const semverSatisfies = require('semver/functions/satisfies');
const simpleGet = require('simple-get');
const tarFs = require('tar-fs');
@@ -23,6 +24,7 @@ const minimumGlibcVersionByArch = {
const hasSharpPrebuild = [
'darwin-x64',
'darwin-arm64',
'linux-arm64',
'linux-x64',
'linuxmusl-x64',
@@ -34,22 +36,27 @@ const hasSharpPrebuild = [
const { minimumLibvipsVersion, minimumLibvipsVersionLabelled } = libvips;
const distHost = process.env.npm_config_sharp_libvips_binary_host || 'https://github.com/lovell/sharp-libvips/releases/download';
const distBaseUrl = process.env.npm_config_sharp_dist_base_url || process.env.SHARP_DIST_BASE_URL || `${distHost}/v${minimumLibvipsVersionLabelled}/`;
const supportsBrotli = ('BrotliDecompress' in zlib);
const installationForced = !!(process.env.npm_config_sharp_install_force || process.env.SHARP_INSTALL_FORCE);
const fail = function (err) {
libvips.log(err);
if (err.code === 'EACCES') {
libvips.log('Are you trying to install as a root or sudo user? Try again with the --unsafe-perm flag');
}
libvips.log('Attempting to build from source via node-gyp but this may fail due to the above error');
libvips.log('Please see https://sharp.pixelplumbing.com/install for required dependencies');
process.exit(1);
};
const handleError = function (err) {
if (installationForced) {
libvips.log(`Installation warning: ${err.message}`);
} else {
throw err;
}
};
const extractTarball = function (tarPath, platformAndArch) {
const vendorPath = path.join(__dirname, '..', 'vendor');
libvips.mkdirSync(vendorPath);
const versionedVendorPath = path.join(vendorPath, minimumLibvipsVersion);
const versionedVendorPath = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, platformAndArch);
libvips.mkdirSync(versionedVendorPath);
const ignoreVendorInclude = hasSharpPrebuild.includes(platformAndArch) && !process.env.npm_config_build_from_source;
@@ -59,7 +66,7 @@ const extractTarball = function (tarPath, platformAndArch) {
stream.pipeline(
fs.createReadStream(tarPath),
supportsBrotli ? new zlib.BrotliDecompress() : new zlib.Gunzip(),
new zlib.BrotliDecompress(),
tarFs.extract(versionedVendorPath, { ignore }),
function (err) {
if (err) {
@@ -95,26 +102,25 @@ try {
if (platformAndArch === 'freebsd-x64' || platformAndArch === 'openbsd-x64' || platformAndArch === 'sunos-x64') {
throw new Error(`BSD/SunOS systems require manual installation of libvips >= ${minimumLibvipsVersion}`);
}
if (detectLibc.family === detectLibc.GLIBC && detectLibc.version) {
if (semver.lt(`${detectLibc.version}.0`, `${minimumGlibcVersionByArch[arch]}.0`)) {
throw new Error(`Use with glibc ${detectLibc.version} requires manual installation of libvips >= ${minimumLibvipsVersion}`);
// Linux libc version check
if (detectLibc.family === detectLibc.GLIBC && detectLibc.version && minimumGlibcVersionByArch[arch]) {
if (semverLessThan(`${detectLibc.version}.0`, `${minimumGlibcVersionByArch[arch]}.0`)) {
handleError(new Error(`Use with glibc ${detectLibc.version} requires manual installation of libvips >= ${minimumLibvipsVersion}`));
}
}
if (detectLibc.family === detectLibc.MUSL && detectLibc.version) {
if (semver.lt(detectLibc.version, '1.1.24')) {
throw new Error(`Use with musl ${detectLibc.version} requires manual installation of libvips >= ${minimumLibvipsVersion}`);
if (semverLessThan(detectLibc.version, '1.1.24')) {
handleError(new Error(`Use with musl ${detectLibc.version} requires manual installation of libvips >= ${minimumLibvipsVersion}`));
}
}
// Node.js minimum version check
const supportedNodeVersion = process.env.npm_package_engines_node || require('../package.json').engines.node;
if (!semver.satisfies(process.versions.node, supportedNodeVersion)) {
throw new Error(`Expected Node.js version ${supportedNodeVersion} but found ${process.versions.node}`);
if (!semverSatisfies(process.versions.node, supportedNodeVersion)) {
handleError(new Error(`Expected Node.js version ${supportedNodeVersion} but found ${process.versions.node}`));
}
const extension = supportsBrotli ? 'br' : 'gz';
// Download to per-process temporary file
const tarFilename = ['libvips', minimumLibvipsVersion, platformAndArch].join('-') + '.tar.' + extension;
const tarFilename = ['libvips', minimumLibvipsVersion, platformAndArch].join('-') + '.tar.br';
const tarPathCache = path.join(libvips.cachePath(), tarFilename);
if (fs.existsSync(tarPathCache)) {
libvips.log(`Using cached ${tarPathCache}`);

View File

@@ -25,7 +25,7 @@ module.exports = function () {
? tunnelAgent.httpsOverHttps
: tunnelAgent.httpsOverHttp;
const proxyAuth = proxy.username && proxy.password
? `${proxy.username}:${proxy.password}`
? `${decodeURIComponent(proxy.username)}:${decodeURIComponent(proxy.password)}`
: null;
return tunnel({
proxy: {

View File

@@ -15,6 +15,8 @@ const bool = {
/**
* Remove alpha channel, if any. This is a no-op if the image does not have an alpha channel.
*
* See also {@link /api-operation#flatten|flatten}.
*
* @example
* sharp('rgba.png')
* .removeAlpha()
@@ -70,13 +72,17 @@ function ensureAlpha (alpha) {
* Extract a single channel from a multi-channel image.
*
* @example
* sharp(input)
* // green.jpg is a greyscale image containing the green channel of the input
* await sharp(input)
* .extractChannel('green')
* .toColourspace('b-w')
* .toFile('green.jpg', function(err, info) {
* // info.channels === 1
* // green.jpg is a greyscale image containing the green channel of the input
* });
* .toFile('green.jpg');
*
* @example
* // red1 is the red value of the first pixel, red2 the second pixel etc.
* const [red1, red2, ...] = await sharp(input)
* .extractChannel(0)
* .raw()
* .toBuffer();
*
* @param {number|string} channel - zero-indexed channel/band number to extract, or `red`, `green`, `blue` or `alpha`.
* @returns {Sharp}
@@ -92,7 +98,7 @@ function extractChannel (channel) {
} else {
throw is.invalidParameterError('channel', 'integer or one of: red, green, blue, alpha', channel);
}
return this;
return this.toColourspace('b-w');
}
/**

View File

@@ -54,6 +54,45 @@ function grayscale (grayscale) {
return this.greyscale(grayscale);
}
/**
* Set the pipeline colourspace.
*
* The input image will be converted to the provided colourspace at the start of the pipeline.
* All operations will use this colourspace before converting to the output colourspace, as defined by {@link toColourspace}.
*
* This feature is experimental and has not yet been fully-tested with all operations.
*
* @since 0.29.0
*
* @example
* // Run pipeline in 16 bits per channel RGB while converting final result to 8 bits per channel sRGB.
* await sharp(input)
* .pipelineColourspace('rgb16')
* .toColourspace('srgb')
* .toFile('16bpc-pipeline-to-8bpc-output.png')
*
* @param {string} [colourspace] - pipeline colourspace e.g. `rgb16`, `scrgb`, `lab`, `grey16` [...](https://github.com/libvips/libvips/blob/41cff4e9d0838498487a00623462204eb10ee5b8/libvips/iofuncs/enumtypes.c#L774)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function pipelineColourspace (colourspace) {
if (!is.string(colourspace)) {
throw is.invalidParameterError('colourspace', 'string', colourspace);
}
this.options.colourspaceInput = colourspace;
return this;
}
/**
* Alternative spelling of `pipelineColourspace`.
* @param {string} [colorspace] - pipeline colorspace.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function pipelineColorspace (colorspace) {
return this.pipelineColourspace(colorspace);
}
/**
* Set the output colourspace.
* By default output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
@@ -64,7 +103,7 @@ function grayscale (grayscale) {
* .toColourspace('rgb16')
* .toFile('16-bpp.png')
*
* @param {string} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568)
* @param {string} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/3c0bfdf74ce1dc37a6429bed47fa76f16e2cd70a/libvips/iofuncs/enumtypes.c#L777-L794)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
@@ -119,6 +158,8 @@ module.exports = function (Sharp) {
tint,
greyscale,
grayscale,
pipelineColourspace,
pipelineColorspace,
toColourspace,
toColorspace,
// Private

View File

@@ -89,6 +89,8 @@ const blend = {
* @param {Number} [images[].raw.width]
* @param {Number} [images[].raw.height]
* @param {Number} [images[].raw.channels]
* @param {boolean} [images[].failOnError=true] - @see {@link /api-constructor#parameters|constructor parameters}
* @param {number|boolean} [images[].limitInputPixels=268402689] - @see {@link /api-constructor#parameters|constructor parameters}
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/

View File

@@ -5,32 +5,7 @@ const stream = require('stream');
const is = require('./is');
require('./libvips').hasVendoredLibvips();
/* istanbul ignore next */
try {
require('../build/Release/sharp.node');
} catch (err) {
// Bail early if bindings aren't available
const help = ['', 'Something went wrong installing the "sharp" module', '', err.message, ''];
if (/NODE_MODULE_VERSION/.test(err.message)) {
help.push('- Ensure the version of Node.js used at install time matches that used at runtime');
} else if (/invalid ELF header/.test(err.message)) {
help.push(`- Ensure "${process.platform}" is used at install time as well as runtime`);
} else if (/dylib/.test(err.message) && /Incompatible library version/.test(err.message)) {
help.push('- Run "brew update && brew upgrade vips"');
} else {
help.push(
'- Remove the "node_modules/sharp" directory then run',
' "npm install --ignore-scripts=false --verbose sharp" and look for errors'
);
}
help.push(
'- Consult the installation documentation at https://sharp.pixelplumbing.com/install',
'- Search for this error at https://github.com/lovell/sharp/issues', ''
);
const error = help.join('\n');
throw new Error(error);
}
require('./sharp');
// Use NODE_DEBUG=sharp to enable libvips warnings
const debuglog = util.debuglog('sharp');
@@ -117,8 +92,9 @@ const debuglog = util.debuglog('sharp');
* }
* }).toFile('noise.png');
*
* @param {(Buffer|Uint8Array|Uint8ClampedArray|string)} [input] - if present, can be
* a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data, or
* @param {(Buffer|Uint8Array|Uint8ClampedArray|Int8Array|Uint16Array|Int16Array|Uint32Array|Int32Array|Float32Array|Float64Array|string)} [input] - if present, can be
* a Buffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image data, or
* a TypedArray containing raw pixel image data, or
* a String containing the filesystem path to an JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image file.
* JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
* @param {Object} [options] - if present, is an Object with optional attributes.
@@ -139,6 +115,8 @@ const debuglog = util.debuglog('sharp');
* @param {number} [options.raw.width] - integral number of pixels wide.
* @param {number} [options.raw.height] - integral number of pixels high.
* @param {number} [options.raw.channels] - integral number of channels, between 1 and 4.
* @param {boolean} [options.raw.premultiplied] - specifies that the raw input has already been premultiplied, set to `true`
* to avoid sharp premultiplying the image. (optional, default `false`)
* @param {Object} [options.create] - describes a new image to be created.
* @param {number} [options.create.width] - integral number of pixels wide.
* @param {number} [options.create.height] - integral number of pixels high.
@@ -202,6 +180,7 @@ const Sharp = function (input, options) {
flatten: false,
flattenBackground: [0, 0, 0],
negate: false,
negateAlpha: true,
medianSize: 0,
blurSigma: 0,
sharpenSigma: 0,
@@ -214,9 +193,13 @@ const Sharp = function (input, options) {
gammaOut: 0,
greyscale: false,
normalise: false,
claheWidth: 0,
claheHeight: 0,
claheMaxSlope: 3,
brightness: 1,
saturation: 1,
hue: 0,
lightness: 0,
booleanBufferIn: null,
booleanFileIn: '',
joinChannelIn: [],
@@ -224,6 +207,7 @@ const Sharp = function (input, options) {
removeAlpha: false,
ensureAlpha: -1,
colourspace: 'srgb',
colourspaceInput: 'last',
composite: [],
// output
fileOut: '',
@@ -231,6 +215,7 @@ const Sharp = function (input, options) {
streamOut: false,
withMetadata: false,
withMetadataOrientation: -1,
withMetadataDensity: 0,
withMetadataIcc: '',
withMetadataStrs: {},
resolveWithObject: false,
@@ -248,8 +233,13 @@ const Sharp = function (input, options) {
pngAdaptiveFiltering: false,
pngPalette: false,
pngQuality: 100,
pngColours: 256,
pngBitdepth: 8,
pngDither: 1,
jp2Quality: 80,
jp2TileHeight: 512,
jp2TileWidth: 512,
jp2Lossless: false,
jp2ChromaSubsampling: '4:4:4',
webpQuality: 80,
webpAlphaQuality: 100,
webpLossless: false,
@@ -270,7 +260,8 @@ const Sharp = function (input, options) {
heifLossless: false,
heifCompression: 'av1',
heifSpeed: 5,
heifChromaSubsampling: '4:2:0',
heifChromaSubsampling: '4:4:4',
rawDepth: 'uchar',
tileSize: 256,
tileOverlap: 0,
tileContainer: 'fs',
@@ -282,6 +273,7 @@ const Sharp = function (input, options) {
tileBackground: [255, 255, 255, 255],
tileCentre: false,
tileId: 'https://example.com/iiif',
timeoutSeconds: 0,
linearA: 1,
linearB: 0,
// Function to notify of libvips warnings
@@ -297,7 +289,8 @@ const Sharp = function (input, options) {
this.options.input = this._createInputDescriptor(input, options, { allowStream: true });
return this;
};
util.inherits(Sharp, stream.Duplex);
Object.setPrototypeOf(Sharp.prototype, stream.Duplex.prototype);
Object.setPrototypeOf(Sharp, stream.Duplex);
/**
* Take a "snapshot" of the Sharp instance, returning a new instance.

View File

@@ -2,7 +2,7 @@
const color = require('color');
const is = require('./is');
const sharp = require('../build/Release/sharp.node');
const sharp = require('./sharp');
/**
* Extract input options, if any, from an object.
@@ -30,9 +30,14 @@ function _createInputDescriptor (input, inputOptions, containerOptions) {
inputDescriptor.file = input;
} else if (is.buffer(input)) {
// Buffer
if (input.length === 0) {
throw Error('Input Buffer is empty');
}
inputDescriptor.buffer = input;
} else if (is.uint8Array(input)) {
// Uint8Array or Uint8ClampedArray
} else if (is.typedArray(input)) {
if (input.length === 0) {
throw Error('Input Bit Array is empty');
}
inputDescriptor.buffer = Buffer.from(input.buffer);
} else if (is.plainObject(input) && !is.defined(inputOptions)) {
// Plain Object descriptor, e.g. create
@@ -97,6 +102,38 @@ function _createInputDescriptor (input, inputOptions, containerOptions) {
inputDescriptor.rawWidth = inputOptions.raw.width;
inputDescriptor.rawHeight = inputOptions.raw.height;
inputDescriptor.rawChannels = inputOptions.raw.channels;
inputDescriptor.rawPremultiplied = !!inputOptions.raw.premultiplied;
switch (input.constructor) {
case Uint8Array:
case Uint8ClampedArray:
inputDescriptor.rawDepth = 'uchar';
break;
case Int8Array:
inputDescriptor.rawDepth = 'char';
break;
case Uint16Array:
inputDescriptor.rawDepth = 'ushort';
break;
case Int16Array:
inputDescriptor.rawDepth = 'short';
break;
case Uint32Array:
inputDescriptor.rawDepth = 'uint';
break;
case Int32Array:
inputDescriptor.rawDepth = 'int';
break;
case Float32Array:
inputDescriptor.rawDepth = 'float';
break;
case Float64Array:
inputDescriptor.rawDepth = 'double';
break;
default:
inputDescriptor.rawDepth = 'uchar';
break;
}
} else {
throw new Error('Expected width, height and channels for raw pixel input');
}
@@ -264,6 +301,8 @@ function _isStreamInput () {
* - `pagePrimary`: Number of the primary page in a HEIF image
* - `levels`: Details of each level in a multi-level image provided as an array of objects, requires libvips compiled with support for OpenSlide
* - `subifds`: Number of Sub Image File Directories in an OME-TIFF image
* - `background`: Default background colour, if present, for PNG (bKGD) and GIF images, either an RGB Object or a single greyscale value
* - `compression`: The encoder used to compress an HEIF file, `av1` (AVIF) or `hevc` (HEIC)
* - `hasProfile`: Boolean indicating the presence of an embedded ICC profile
* - `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
* - `orientation`: Number value of the EXIF Orientation header, if present
@@ -349,6 +388,9 @@ function metadata (callback) {
* - `sharpness`: Estimation of greyscale sharpness based on the standard deviation of a Laplacian convolution, discarding alpha channel if any (experimental)
* - `dominant`: Object containing most dominant sRGB colour based on a 4096-bin 3D histogram (experimental)
*
* **Note**: Statistics are derived from the original input image. Any operations performed on the image must first be
* written to a buffer in order to run `stats` on the result (see third example).
*
* @example
* const image = sharp(inputJpg);
* image
@@ -361,6 +403,13 @@ function metadata (callback) {
* const { entropy, sharpness, dominant } = await sharp(input).stats();
* const { r, g, b } = dominant;
*
* @example
* const image = sharp(input);
* // store intermediate result
* const part = await image.extract(region).toBuffer();
* // create new instance to obtain statistics of extracted region
* const stats = await sharp(part).stats();
*
* @param {Function} [callback] - called with the arguments `(err, stats)`
* @returns {Promise<Object>}
*/

View File

@@ -49,12 +49,26 @@ const buffer = function (val) {
};
/**
* Is this value a Uint8Array or Uint8ClampedArray object?
* Is this value a typed array object?. E.g. Uint8Array or Uint8ClampedArray?
* @private
*/
const uint8Array = function (val) {
// allow both since Uint8ClampedArray simply clamps the values between 0-255
return val instanceof Uint8Array || val instanceof Uint8ClampedArray;
const typedArray = function (val) {
if (defined(val)) {
switch (val.constructor) {
case Uint8Array:
case Uint8ClampedArray:
case Int8Array:
case Uint16Array:
case Int16Array:
case Uint32Array:
case Int32Array:
case Float32Array:
case Float64Array:
return true;
}
}
return false;
};
/**
@@ -119,7 +133,7 @@ module.exports = {
fn: fn,
bool: bool,
buffer: buffer,
uint8Array: uint8Array,
typedArray: typedArray,
string: string,
number: number,
integer: integer,

View File

@@ -4,13 +4,15 @@ const fs = require('fs');
const os = require('os');
const path = require('path');
const spawnSync = require('child_process').spawnSync;
const semver = require('semver');
const semverCoerce = require('semver/functions/coerce');
const semverGreaterThanOrEqualTo = require('semver/functions/gte');
const platform = require('./platform');
const env = process.env;
const minimumLibvipsVersionLabelled = env.npm_package_config_libvips || /* istanbul ignore next */
require('../package.json').config.libvips;
const minimumLibvipsVersion = semver.coerce(minimumLibvipsVersionLabelled).version;
const minimumLibvipsVersion = semverCoerce(minimumLibvipsVersionLabelled).version;
const spawnSyncOptions = {
encoding: 'utf8',
@@ -19,9 +21,9 @@ const spawnSyncOptions = {
const mkdirSync = function (dirPath) {
try {
fs.mkdirSync(dirPath);
fs.mkdirSync(dirPath, { recursive: true });
} catch (err) {
/* istanbul ignore if */
/* istanbul ignore next */
if (err.code !== 'EEXIST') {
throw err;
}
@@ -65,23 +67,8 @@ const globalLibvipsVersion = function () {
};
const hasVendoredLibvips = function () {
const currentPlatformId = platform();
const vendorPath = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion);
let vendorPlatformId;
try {
vendorPlatformId = require(path.join(vendorPath, 'platform.json'));
} catch (err) {}
/* istanbul ignore else */
if (vendorPlatformId) {
/* istanbul ignore else */
if (currentPlatformId === vendorPlatformId) {
return true;
} else {
throw new Error(`'${vendorPlatformId}' binaries cannot be used on the '${currentPlatformId}' platform. Please remove the 'node_modules/sharp' directory and run 'npm install' on the '${currentPlatformId}' platform.`);
}
} else {
return false;
}
const vendorPath = path.join(__dirname, '..', 'vendor', minimumLibvipsVersion, platform());
return fs.existsSync(vendorPath);
};
const pkgConfigPath = function () {
@@ -105,7 +92,7 @@ const useGlobalLibvips = function () {
}
const globalVipsVersion = globalLibvipsVersion();
return !!globalVipsVersion && /* istanbul ignore next */
semver.gte(globalVipsVersion, minimumLibvipsVersion);
semverGreaterThanOrEqualTo(globalVipsVersion, minimumLibvipsVersion);
};
module.exports = {

View File

@@ -270,9 +270,11 @@ function blur (sigma) {
/**
* Merge alpha transparency channel, if any, with a background, then remove the alpha channel.
*
* See also {@link /api-channel#removealpha|removeAlpha}.
*
* @example
* await sharp(rgbaInput)
* .flatten('#F0A703')
* .flatten({ background: '#F0A703' })
* .toBuffer();
*
* @param {Object} [options]
@@ -323,11 +325,19 @@ function gamma (gamma, gammaOut) {
/**
* Produce the "negative" of the image.
* @param {Boolean} [negate=true]
* @param {Object} [options]
* @param {Boolean} [options.alpha=true] Whether or not to negate any alpha channel
* @returns {Sharp}
*/
function negate (negate) {
this.options.negate = is.bool(negate) ? negate : true;
function negate (options) {
this.options.negate = is.bool(options) ? options : true;
if (is.plainObject(options) && 'alpha' in options) {
if (!is.bool(options.alpha)) {
throw is.invalidParameterError('alpha', 'should be boolean value', options.alpha);
} else {
this.options.negateAlpha = options.alpha;
}
}
return this;
}
@@ -350,6 +360,47 @@ function normalize (normalize) {
return this.normalise(normalize);
}
/**
* Perform contrast limiting adaptive histogram equalization
* {@link https://en.wikipedia.org/wiki/Adaptive_histogram_equalization#Contrast_Limited_AHE|CLAHE}.
*
* This will, in general, enhance the clarity of the image by bringing out darker details.
*
* @since 0.28.3
*
* @param {Object} options
* @param {number} options.width - integer width of the region in pixels.
* @param {number} options.height - integer height of the region in pixels.
* @param {number} [options.maxSlope=3] - maximum value for the slope of the
* cumulative histogram. A value of 0 disables contrast limiting. Valid values
* are integers in the range 0-100 (inclusive)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function clahe (options) {
if (!is.plainObject(options)) {
throw is.invalidParameterError('options', 'plain object', options);
}
if (!('width' in options) || !is.integer(options.width) || options.width <= 0) {
throw is.invalidParameterError('width', 'integer above zero', options.width);
} else {
this.options.claheWidth = options.width;
}
if (!('height' in options) || !is.integer(options.height) || options.height <= 0) {
throw is.invalidParameterError('height', 'integer above zero', options.height);
} else {
this.options.claheHeight = options.height;
}
if (!is.defined(options.maxSlope)) {
this.options.claheMaxSlope = 3;
} else if (!is.integer(options.maxSlope) || options.maxSlope < 0 || options.maxSlope > 100) {
throw is.invalidParameterError('maxSlope', 'integer 0-100', options.maxSlope);
} else {
this.options.claheMaxSlope = options.maxSlope;
}
return this;
}
/**
* Convolve the image with the specified kernel.
*
@@ -368,7 +419,7 @@ function normalize (normalize) {
*
* @param {Object} kernel
* @param {number} kernel.width - width of the kernel in pixels.
* @param {number} kernel.height - width of the kernel in pixels.
* @param {number} kernel.height - height of the kernel in pixels.
* @param {Array<number>} kernel.kernel - Array of length `width*height` containing the kernel values.
* @param {number} [kernel.scale=sum] - the scale of the kernel in pixels.
* @param {number} [kernel.offset=0] - the offset of the kernel in pixels.
@@ -519,14 +570,16 @@ function recomb (inputMatrix) {
}
/**
* Transforms the image using brightness, saturation and hue rotation.
* Transforms the image using brightness, saturation, hue rotation, and lightness.
* Brightness and lightness both operate on luminance, with the difference being that
* brightness is multiplicative whereas lightness is additive.
*
* @since 0.22.1
*
* @example
* sharp(input)
* .modulate({
* brightness: 2 // increase lightness by a factor of 2
* brightness: 2 // increase brightness by a factor of 2
* });
*
* sharp(input)
@@ -534,6 +587,11 @@ function recomb (inputMatrix) {
* hue: 180 // hue-rotate by 180 degrees
* });
*
* sharp(input)
* .modulate({
* lightness: 50 // increase lightness by +50
* });
*
* // decreate brightness and saturation while also hue-rotating by 90 degrees
* sharp(input)
* .modulate({
@@ -546,6 +604,7 @@ function recomb (inputMatrix) {
* @param {number} [options.brightness] Brightness multiplier
* @param {number} [options.saturation] Saturation multiplier
* @param {number} [options.hue] Degrees for hue rotation
* @param {number} [options.lightness] Lightness addend
* @returns {Sharp}
*/
function modulate (options) {
@@ -573,6 +632,13 @@ function modulate (options) {
throw is.invalidParameterError('hue', 'number', options.hue);
}
}
if ('lightness' in options) {
if (is.number(options.lightness)) {
this.options.lightness = options.lightness;
} else {
throw is.invalidParameterError('lightness', 'number', options.lightness);
}
}
return this;
}
@@ -594,6 +660,7 @@ module.exports = function (Sharp) {
negate,
normalise,
normalize,
clahe,
convolve,
threshold,
boolean,

View File

@@ -1,7 +1,8 @@
'use strict';
const path = require('path');
const is = require('./is');
const sharp = require('../build/Release/sharp.node');
const sharp = require('./sharp');
const formats = new Map([
['heic', 'heif'],
@@ -12,11 +13,17 @@ const formats = new Map([
['png', 'png'],
['raw', 'raw'],
['tiff', 'tiff'],
['tif', 'tiff'],
['webp', 'webp'],
['gif', 'gif']
['gif', 'gif'],
['jp2', 'jp2'],
['jpx', 'jp2'],
['j2k', 'jp2'],
['j2c', 'jp2']
]);
const errMagickSave = new Error('GIF output requires libvips with support for ImageMagick');
const errJp2Save = new Error('JP2 output requires libvips with support for OpenJPEG');
/**
* Write output image data to a file.
@@ -28,6 +35,8 @@ const errMagickSave = new Error('GIF output requires libvips with support for Im
* By default all metadata will be removed, which includes EXIF-based orientation.
* See {@link withMetadata} for control over this.
*
* The caller is responsible for ensuring directory structures and permissions exist.
*
* A `Promise` is returned when `callback` is not provided.
*
* @example
@@ -52,7 +61,7 @@ function toFile (fileOut, callback) {
let err;
if (!is.string(fileOut)) {
err = new Error('Missing output file path');
} else if (this.options.input.file === fileOut) {
} else if (is.string(this.options.input.file) && path.resolve(this.options.input.file) === path.resolve(fileOut)) {
err = new Error('Cannot use same file for input and output');
} else if (this.options.formatOut === 'input' && fileOut.toLowerCase().endsWith('.gif') && !this.constructor.format.magick.output.file) {
err = errMagickSave;
@@ -150,7 +159,7 @@ function toBuffer (options, callback) {
*
* @example
* // Set "IFD0-Copyright" in output EXIF metadata
* await sharp(input)
* const data = await sharp(input)
* .withMetadata({
* exif: {
* IFD0: {
@@ -160,10 +169,17 @@ function toBuffer (options, callback) {
* })
* .toBuffer();
*
* * @example
* // Set output metadata to 96 DPI
* const data = await sharp(input)
* .withMetadata({ density: 96 })
* .toBuffer();
*
* @param {Object} [options]
* @param {number} [options.orientation] value between 1 and 8, used to update the EXIF `Orientation` tag.
* @param {string} [options.icc] filesystem path to output ICC profile, defaults to sRGB.
* @param {Object<Object>} [options.exif={}] Object keyed by IFD0, IFD1 etc. of key/value string pairs to write as EXIF data.
* @param {number} [options.density] Number of pixels per inch (DPI).
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
@@ -177,6 +193,13 @@ function withMetadata (options) {
throw is.invalidParameterError('orientation', 'integer between 1 and 8', options.orientation);
}
}
if (is.defined(options.density)) {
if (is.number(options.density) && options.density > 0) {
this.options.withMetadataDensity = options.density;
} else {
throw is.invalidParameterError('density', 'positive number', options.density);
}
}
if (is.defined(options.icc)) {
if (is.string(options.icc)) {
this.options.withMetadataIcc = options.icc;
@@ -389,7 +412,7 @@ function png (options) {
const colours = options.colours || options.colors;
if (is.defined(colours)) {
if (is.integer(colours) && is.inRange(colours, 2, 256)) {
this.options.pngColours = colours;
this.options.pngBitdepth = 1 << 31 - Math.clz32(Math.ceil(Math.log2(colours)));
} else {
throw is.invalidParameterError('colours', 'integer between 2 and 256', colours);
}
@@ -495,6 +518,84 @@ function gif (options) {
return this._updateFormatOut('gif', options);
}
/**
* Use these JP2 options for output image.
*
* Requires libvips compiled with support for OpenJPEG.
* The prebuilt binaries do not include this - see
* {@link https://sharp.pixelplumbing.com/install#custom-libvips installing a custom libvips}.
*
* @example
* // Convert any input to lossless JP2 output
* const data = await sharp(input)
* .jp2({ lossless: true })
* .toBuffer();
*
* @example
* // Convert any input to very high quality JP2 output
* const data = await sharp(input)
* .jp2({
* quality: 100,
* chromaSubsampling: '4:4:4'
* })
* .toBuffer();
*
* @since 0.29.1
*
* @param {Object} [options] - output options
* @param {number} [options.quality=80] - quality, integer 1-100
* @param {boolean} [options.lossless=false] - use lossless compression mode
* @param {number} [options.tileWidth=512] - horizontal tile size
* @param {number} [options.tileHeight=512] - vertical tile size
* @param {string} [options.chromaSubsampling='4:4:4'] - set to '4:2:0' to use chroma subsampling
* @returns {Sharp}
* @throws {Error} Invalid options
*/
/* istanbul ignore next */
function jp2 (options) {
if (!this.constructor.format.jp2k.output.buffer) {
throw errJp2Save;
}
if (is.object(options)) {
if (is.defined(options.quality)) {
if (is.integer(options.quality) && is.inRange(options.quality, 1, 100)) {
this.options.jp2Quality = options.quality;
} else {
throw is.invalidParameterError('quality', 'integer between 1 and 100', options.quality);
}
}
if (is.defined(options.lossless)) {
if (is.bool(options.lossless)) {
this.options.jp2Lossless = options.lossless;
} else {
throw is.invalidParameterError('lossless', 'boolean', options.lossless);
}
}
if (is.defined(options.tileWidth)) {
if (is.integer(options.tileWidth) && is.inRange(options.tileWidth, 1, 32768)) {
this.options.jp2TileWidth = options.tileWidth;
} else {
throw is.invalidParameterError('tileWidth', 'integer between 1 and 32768', options.tileWidth);
}
}
if (is.defined(options.tileHeight)) {
if (is.integer(options.tileHeight) && is.inRange(options.tileHeight, 1, 32768)) {
this.options.jp2TileHeight = options.tileHeight;
} else {
throw is.invalidParameterError('tileHeight', 'integer between 1 and 32768', options.tileHeight);
}
}
if (is.defined(options.chromaSubsampling)) {
if (is.string(options.chromaSubsampling) && is.inArray(options.chromaSubsampling, ['4:2:0', '4:4:4'])) {
this.options.heifChromaSubsampling = options.chromaSubsampling;
} else {
throw is.invalidParameterError('chromaSubsampling', 'one of: 4:2:0, 4:4:4', options.chromaSubsampling);
}
}
}
return this._updateFormatOut('jp2', options);
}
/**
* Set animation options if available.
* @private
@@ -536,6 +637,8 @@ function trySetAnimationOptions (source, target) {
/**
* Use these TIFF options for output image.
*
* The `density` can be set in pixels/inch via {@link withMetadata} instead of providing `xres` and `yres` in pixels/mm.
*
* @example
* // Convert SVG input to LZW-compressed, 1 bit per pixel TIFF output
* sharp('input.svg')
@@ -640,13 +743,15 @@ function tiff (options) {
* Whilst it is possible to create AVIF images smaller than 16x16 pixels,
* most web browsers do not display these properly.
*
* AVIF image sequences are not supported.
*
* @since 0.27.0
*
* @param {Object} [options] - output options
* @param {number} [options.quality=50] - quality, integer 1-100
* @param {boolean} [options.lossless=false] - use lossless compression
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:2:0'] - set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:4:4'] - set to '4:2:0' to use chroma subsampling
* @returns {Sharp}
* @throws {Error} Invalid options
*/
@@ -666,8 +771,8 @@ function avif (options) {
* @param {number} [options.quality=50] - quality, integer 1-100
* @param {string} [options.compression='av1'] - compression format: av1, hevc
* @param {boolean} [options.lossless=false] - use lossless compression
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 8 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:2:0'] - set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling, requires libvips v8.11.0
* @param {number} [options.speed=5] - CPU effort vs file size, 0 (slowest/smallest) to 9 (fastest/largest)
* @param {string} [options.chromaSubsampling='4:4:4'] - set to '4:2:0' to use chroma subsampling
* @returns {Sharp}
* @throws {Error} Invalid options
*/
@@ -695,10 +800,10 @@ function heif (options) {
}
}
if (is.defined(options.speed)) {
if (is.integer(options.speed) && is.inRange(options.speed, 0, 8)) {
if (is.integer(options.speed) && is.inRange(options.speed, 0, 9)) {
this.options.heifSpeed = options.speed;
} else {
throw is.invalidParameterError('speed', 'integer between 0 and 8', options.speed);
throw is.invalidParameterError('speed', 'integer between 0 and 9', options.speed);
}
}
if (is.defined(options.chromaSubsampling)) {
@@ -713,28 +818,41 @@ function heif (options) {
}
/**
* Force output to be raw, uncompressed, 8-bit unsigned integer (unit8) pixel data.
* Force output to be raw, uncompressed pixel data.
* Pixel ordering is left-to-right, top-to-bottom, without padding.
* Channel ordering will be RGB or RGBA for non-greyscale colourspaces.
*
* @example
* // Extract raw RGB pixel data from JPEG input
* // Extract raw, unsigned 8-bit RGB pixel data from JPEG input
* const { data, info } = await sharp('input.jpg')
* .raw()
* .toBuffer({ resolveWithObject: true });
*
* @example
* // Extract alpha channel as raw pixel data from PNG input
* // Extract alpha channel as raw, unsigned 16-bit pixel data from PNG input
* const data = await sharp('input.png')
* .ensureAlpha()
* .extractChannel(3)
* .toColourspace('b-w')
* .raw()
* .raw({ depth: 'ushort' })
* .toBuffer();
*
* @returns {Sharp}
* @param {Object} [options] - output options
* @param {string} [options.depth='uchar'] - bit depth, one of: char, uchar (default), short, ushort, int, uint, float, complex, double, dpcomplex
* @throws {Error} Invalid options
*/
function raw () {
function raw (options) {
if (is.object(options)) {
if (is.defined(options.depth)) {
if (is.string(options.depth) && is.inArray(options.depth,
['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'float', 'complex', 'double', 'dpcomplex']
)) {
this.options.rawDepth = options.depth;
} else {
throw is.invalidParameterError('depth', 'one of: char, uchar, short, ushort, int, uint, float, complex, double, dpcomplex', options.depth);
}
}
}
return this._updateFormatOut('raw');
}
@@ -859,6 +977,31 @@ function tile (options) {
return this._updateFormatOut('dz');
}
/**
* Set a timeout for processing, in seconds.
* Use a value of zero to continue processing indefinitely, the default behaviour.
*
* The clock starts when libvips opens an input image for processing.
* Time spent waiting for a libuv thread to become available is not included.
*
* @since 0.29.2
*
* @param {Object} options
* @param {number} options.seconds - Number of seconds after which processing will be stopped
* @returns {Sharp}
*/
function timeout (options) {
if (!is.plainObject(options)) {
throw is.invalidParameterError('options', 'object', options);
}
if (is.integer(options.seconds) && is.inRange(options.seconds, 0, 3600)) {
this.options.timeoutSeconds = options.seconds;
} else {
throw is.invalidParameterError('seconds', 'integer between 0 and 3600', options.seconds);
}
return this;
}
/**
* Update the output format unless options.force is false,
* in which case revert to input format.
@@ -1004,6 +1147,7 @@ module.exports = function (Sharp) {
withMetadata,
toFormat,
jpeg,
jp2,
png,
webp,
tiff,
@@ -1012,6 +1156,7 @@ module.exports = function (Sharp) {
gif,
raw,
tile,
timeout,
// Private
_updateFormatOut,
_setBooleanOption,

31
lib/sharp.js Normal file
View File

@@ -0,0 +1,31 @@
'use strict';
const platformAndArch = require('./platform')();
/* istanbul ignore next */
try {
module.exports = require(`../build/Release/sharp-${platformAndArch}.node`);
} catch (err) {
// Bail early if bindings aren't available
const help = ['', 'Something went wrong installing the "sharp" module', '', err.message, '', 'Possible solutions:'];
if (/dylib/.test(err.message) && /Incompatible library version/.test(err.message)) {
help.push('- Update Homebrew: "brew update && brew upgrade vips"');
} else {
help.push(
'- Install with the --verbose flag and look for errors: "npm install --ignore-scripts=false --verbose sharp"',
`- Install for the current runtime: "npm install --platform=${process.platform} --arch=${process.arch} sharp"`
);
}
help.push(
'- Consult the installation documentation: https://sharp.pixelplumbing.com/install'
);
// Check loaded
if (process.platform === 'win32') {
const loadedModule = Object.keys(require.cache).find((i) => /[\\/]build[\\/]Release[\\/]sharp(.*)\.node$/.test(i));
if (loadedModule) {
const [, loadedPackage] = loadedModule.match(/node_modules[\\/]([^\\/]+)[\\/]/);
help.push(`- Ensure the version of sharp aligns with the ${loadedPackage} package: "npm ls sharp"`);
}
}
throw new Error(help.join('\n'));
}

View File

@@ -4,7 +4,8 @@ const events = require('events');
const detectLibc = require('detect-libc');
const is = require('./is');
const sharp = require('../build/Release/sharp.node');
const platformAndArch = require('./platform')();
const sharp = require('./sharp');
/**
* An Object containing nested boolean values representing the available input and output formats/methods.
@@ -45,7 +46,7 @@ let versions = {
vips: sharp.libvipsVersion()
};
try {
versions = require(`../vendor/${versions.vips}/versions.json`);
versions = require(`../vendor/${versions.vips}/${platformAndArch}/versions.json`);
} catch (err) {}
/**

View File

@@ -1,7 +1,7 @@
{
"name": "sharp",
"description": "High performance Node.js image processing, the fastest module to resize JPEG, PNG, WebP, AVIF and TIFF images",
"version": "0.28.1",
"version": "0.29.3",
"author": "Lovell Fuller <npm@lovell.info>",
"homepage": "https://github.com/lovell/sharp",
"contributors": [
@@ -74,12 +74,18 @@
"Christian Flintrup <chr@gigahost.dk>",
"Manan Jadhav <manan@motionden.com>",
"Leon Radley <leon@radley.se>",
"alza54 <alza54@thiocod.in>"
"alza54 <alza54@thiocod.in>",
"Jacob Smith <jacob@frende.me>",
"Michael Nutt <michael@nutt.im>",
"Brad Parham <baparham@gmail.com>",
"Taneli Vatanen <taneli.vatanen@gmail.com>",
"Joris Dugué <zaruike10@gmail.com>"
],
"scripts": {
"install": "(node install/libvips && node install/dll-copy && prebuild-install) || (node-gyp rebuild && node install/dll-copy)",
"install": "(node install/libvips && node install/dll-copy && prebuild-install) || (node install/can-compile && node-gyp rebuild && node install/dll-copy)",
"clean": "rm -rf node_modules/ build/ vendor/ .nyc_output/ coverage/ test/fixtures/output.*",
"test": "semistandard && cpplint && npm run test-unit && npm run test-licensing",
"test": "npm run test-lint && npm run test-unit && npm run test-licensing",
"test-lint": "semistandard && cpplint",
"test-unit": "nyc --reporter=lcov --branches=99 mocha --slow=1000 --timeout=60000 ./test/unit/*.js",
"test-licensing": "license-checker --production --summary --onlyAllow=\"Apache-2.0;BSD;ISC;MIT\"",
"test-coverage": "./test/coverage/report.sh",
@@ -107,6 +113,7 @@
"tiff",
"gif",
"svg",
"jp2",
"dzi",
"image",
"resize",
@@ -117,45 +124,45 @@
"vips"
],
"dependencies": {
"color": "^3.1.3",
"color": "^4.0.1",
"detect-libc": "^1.0.3",
"node-addon-api": "^3.1.0",
"prebuild-install": "^6.1.1",
"node-addon-api": "^4.2.0",
"prebuild-install": "^7.0.0",
"semver": "^7.3.5",
"simple-get": "^3.1.0",
"simple-get": "^4.0.0",
"tar-fs": "^2.1.1",
"tunnel-agent": "^0.6.0"
},
"devDependencies": {
"async": "^3.2.0",
"async": "^3.2.2",
"cc": "^3.0.1",
"decompress-zip": "^0.3.3",
"documentation": "^13.2.0",
"documentation": "^13.2.5",
"exif-reader": "^1.0.3",
"icc": "^2.0.0",
"license-checker": "^25.0.1",
"mocha": "^8.3.2",
"mock-fs": "^4.13.0",
"mocha": "^9.1.3",
"mock-fs": "^5.1.2",
"nyc": "^15.1.0",
"prebuild": "^10.0.1",
"prebuild": "^11.0.0",
"rimraf": "^3.0.2",
"semistandard": "^16.0.0"
"semistandard": "^16.0.1"
},
"license": "Apache-2.0",
"config": {
"libvips": "8.10.6",
"libvips": "8.11.3",
"runtime": "napi",
"target": 3
"target": 5
},
"engines": {
"node": ">=10"
"node": ">=12.13.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"binary": {
"napi_versions": [
3
5
]
},
"semistandard": {

View File

@@ -92,9 +92,13 @@ namespace sharp {
}
// Raw pixel input
if (HasAttr(input, "rawChannels")) {
descriptor->rawDepth = static_cast<VipsBandFormat>(
vips_enum_from_nick(nullptr, VIPS_TYPE_BAND_FORMAT,
AttrAsStr(input, "rawDepth").data()));
descriptor->rawChannels = AttrAsUint32(input, "rawChannels");
descriptor->rawWidth = AttrAsUint32(input, "rawWidth");
descriptor->rawHeight = AttrAsUint32(input, "rawHeight");
descriptor->rawPremultiplied = AttrAsBool(input, "rawPremultiplied");
}
// Multi-page input (GIF, TIFF, PDF)
if (HasAttr(input, "pages")) {
@@ -153,6 +157,10 @@ namespace sharp {
bool IsGif(std::string const &str) {
return EndsWith(str, ".gif") || EndsWith(str, ".GIF");
}
bool IsJp2(std::string const &str) {
return EndsWith(str, ".jp2") || EndsWith(str, ".jpx") || EndsWith(str, ".j2k") || EndsWith(str, ".j2c")
|| EndsWith(str, ".JP2") || EndsWith(str, ".JPX") || EndsWith(str, ".J2K") || EndsWith(str, ".J2C");
}
bool IsTiff(std::string const &str) {
return EndsWith(str, ".tif") || EndsWith(str, ".tiff") || EndsWith(str, ".TIF") || EndsWith(str, ".TIFF");
}
@@ -186,6 +194,7 @@ namespace sharp {
case ImageType::WEBP: id = "webp"; break;
case ImageType::TIFF: id = "tiff"; break;
case ImageType::GIF: id = "gif"; break;
case ImageType::JP2: id = "jp2"; break;
case ImageType::SVG: id = "svg"; break;
case ImageType::HEIF: id = "heif"; break;
case ImageType::PDF: id = "pdf"; break;
@@ -222,6 +231,8 @@ namespace sharp {
{ "VipsForeignLoadGifBuffer", ImageType::GIF },
{ "VipsForeignLoadNsgifFile", ImageType::GIF },
{ "VipsForeignLoadNsgifBuffer", ImageType::GIF },
{ "VipsForeignLoadJp2kBuffer", ImageType::JP2 },
{ "VipsForeignLoadJp2kFile", ImageType::JP2 },
{ "VipsForeignLoadSvgFile", ImageType::SVG },
{ "VipsForeignLoadSvgBuffer", ImageType::SVG },
{ "VipsForeignLoadHeifFile", ImageType::HEIF },
@@ -230,11 +241,14 @@ namespace sharp {
{ "VipsForeignLoadPdfBuffer", ImageType::PDF },
{ "VipsForeignLoadMagickFile", ImageType::MAGICK },
{ "VipsForeignLoadMagickBuffer", ImageType::MAGICK },
{ "VipsForeignLoadMagick7File", ImageType::MAGICK },
{ "VipsForeignLoadMagick7Buffer", ImageType::MAGICK },
{ "VipsForeignLoadOpenslide", ImageType::OPENSLIDE },
{ "VipsForeignLoadPpmFile", ImageType::PPM },
{ "VipsForeignLoadFits", ImageType::FITS },
{ "VipsForeignLoadOpenexr", ImageType::EXR },
{ "VipsForeignLoadVips", ImageType::VIPS },
{ "VipsForeignLoadVipsFile", ImageType::VIPS },
{ "VipsForeignLoadRaw", ImageType::RAW }
};
@@ -280,6 +294,7 @@ namespace sharp {
imageType == ImageType::WEBP ||
imageType == ImageType::MAGICK ||
imageType == ImageType::GIF ||
imageType == ImageType::JP2 ||
imageType == ImageType::TIFF ||
imageType == ImageType::HEIF ||
imageType == ImageType::PDF;
@@ -295,12 +310,15 @@ namespace sharp {
if (descriptor->rawChannels > 0) {
// Raw, uncompressed pixel data
image = VImage::new_from_memory(descriptor->buffer, descriptor->bufferLength,
descriptor->rawWidth, descriptor->rawHeight, descriptor->rawChannels, VIPS_FORMAT_UCHAR);
descriptor->rawWidth, descriptor->rawHeight, descriptor->rawChannels, descriptor->rawDepth);
if (descriptor->rawChannels < 3) {
image.get_image()->Type = VIPS_INTERPRETATION_B_W;
} else {
image.get_image()->Type = VIPS_INTERPRETATION_sRGB;
}
if (descriptor->rawPremultiplied) {
image = image.unpremultiply();
}
imageType = ImageType::RAW;
} else {
// Compressed data
@@ -500,6 +518,17 @@ namespace sharp {
return copy;
}
/*
Remove animation properties from image.
*/
VImage RemoveAnimationProperties(VImage image) {
VImage copy = image.copy();
copy.remove(VIPS_META_PAGE_HEIGHT);
copy.remove("delay");
copy.remove("loop");
return copy;
}
/*
Does this image have a non-default density?
*/
@@ -520,9 +549,8 @@ namespace sharp {
VImage SetDensity(VImage image, const double density) {
const double pixelsPerMm = density / 25.4;
VImage copy = image.copy();
copy.set("Xres", pixelsPerMm);
copy.set("Yres", pixelsPerMm);
copy.set(VIPS_META_RESOLUTION_UNIT, "in");
copy.get_image()->Xres = pixelsPerMm;
copy.get_image()->Yres = pixelsPerMm;
return copy;
}
@@ -582,6 +610,33 @@ namespace sharp {
return warning;
}
/*
Attach an event listener for progress updates, used to detect timeout
*/
void SetTimeout(VImage image, int const seconds) {
if (seconds > 0) {
VipsImage *im = image.get_image();
if (im->progress_signal == NULL) {
int *timeout = VIPS_NEW(im, int);
*timeout = seconds;
g_signal_connect(im, "eval", G_CALLBACK(VipsProgressCallBack), timeout);
vips_image_set_progress(im, TRUE);
}
}
}
/*
Event listener for progress updates, used to detect timeout
*/
void VipsProgressCallBack(VipsImage *im, VipsProgress *progress, int *timeout) {
// printf("VipsProgressCallBack progress=%d run=%d timeout=%d\n", progress->percent, progress->run, *timeout);
if (*timeout > 0 && progress->run >= *timeout) {
vips_image_set_kill(im, TRUE);
vips_error("timeout", "%d%% complete", progress->percent);
*timeout = 0;
}
}
/*
Calculate the (left, top) coordinates of the output image
within the input image, applying the given gravity during an embed.
@@ -750,23 +805,27 @@ namespace sharp {
/*
Convert RGBA value to another colourspace
*/
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba, VipsInterpretation const interpretation) {
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba,
VipsInterpretation const interpretation, bool premultiply) {
int const bands = static_cast<int>(rgba.size());
if (bands < 3 || interpretation == VIPS_INTERPRETATION_sRGB || interpretation == VIPS_INTERPRETATION_RGB) {
if (bands < 3) {
return rgba;
} else {
VImage pixel = VImage::new_matrix(1, 1);
pixel.set("bands", bands);
pixel = pixel.new_from_image(rgba);
pixel = pixel.colourspace(interpretation, VImage::option()->set("source_space", VIPS_INTERPRETATION_sRGB));
return pixel(0, 0);
}
VImage pixel = VImage::new_matrix(1, 1);
pixel.set("bands", bands);
pixel = pixel
.new_from_image(rgba)
.colourspace(interpretation, VImage::option()->set("source_space", VIPS_INTERPRETATION_sRGB));
if (premultiply) {
pixel = pixel.premultiply();
}
return pixel(0, 0);
}
/*
Apply the alpha channel to a given colour
*/
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour) {
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour, bool premultiply) {
// Scale up 8-bit values to match 16-bit input image
double const multiplier = sharp::Is16Bit(image.interpretation()) ? 256.0 : 1.0;
// Create alphaColour colour
@@ -790,7 +849,7 @@ namespace sharp {
alphaColour.push_back(colour[3] * multiplier);
}
// Ensure alphaColour colour uses correct colourspace
alphaColour = sharp::GetRgbaAsColourspace(alphaColour, image.interpretation());
alphaColour = sharp::GetRgbaAsColourspace(alphaColour, image.interpretation(), premultiply);
// Add non-transparent alpha channel, if required
if (colour[3] < 255.0 && !HasAlpha(image)) {
image = image.bandjoin(
@@ -820,4 +879,5 @@ namespace sharp {
}
return image;
}
} // namespace sharp

View File

@@ -25,9 +25,9 @@
// Verify platform and compiler compatibility
#if (VIPS_MAJOR_VERSION < 8) || \
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION < 10) || \
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION == 10 && VIPS_MICRO_VERSION < 6)
#error "libvips version 8.10.6+ is required - please see https://sharp.pixelplumbing.com/install"
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION < 11) || \
(VIPS_MAJOR_VERSION == 8 && VIPS_MINOR_VERSION == 11 && VIPS_MICRO_VERSION < 3)
#error "libvips version 8.11.3+ is required - please see https://sharp.pixelplumbing.com/install"
#endif
#if ((!defined(__clang__)) && defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 6)))
@@ -54,9 +54,11 @@ namespace sharp {
size_t bufferLength;
bool isBuffer;
double density;
VipsBandFormat rawDepth;
int rawChannels;
int rawWidth;
int rawHeight;
bool rawPremultiplied;
int pages;
int page;
int level;
@@ -77,9 +79,11 @@ namespace sharp {
bufferLength(0),
isBuffer(FALSE),
density(72.0),
rawDepth(VIPS_FORMAT_UCHAR),
rawChannels(0),
rawWidth(0),
rawHeight(0),
rawPremultiplied(false),
pages(1),
page(0),
level(0),
@@ -112,6 +116,7 @@ namespace sharp {
JPEG,
PNG,
WEBP,
JP2,
TIFF,
GIF,
SVG,
@@ -138,6 +143,7 @@ namespace sharp {
bool IsJpeg(std::string const &str);
bool IsPng(std::string const &str);
bool IsWebp(std::string const &str);
bool IsJp2(std::string const &str);
bool IsGif(std::string const &str);
bool IsTiff(std::string const &str);
bool IsHeic(std::string const &str);
@@ -204,6 +210,11 @@ namespace sharp {
*/
VImage SetAnimationProperties(VImage image, int pageHeight, std::vector<int> delay, int loop);
/*
Remove animation properties from image.
*/
VImage RemoveAnimationProperties(VImage image);
/*
Does this image have a non-default density?
*/
@@ -239,6 +250,16 @@ namespace sharp {
*/
std::string VipsWarningPop();
/*
Attach an event listener for progress updates, used to detect timeout
*/
void SetTimeout(VImage image, int const timeoutSeconds);
/*
Event listener for progress updates, used to detect timeout
*/
void VipsProgressCallBack(VipsImage *image, VipsProgress *progress, int *timeoutSeconds);
/*
Calculate the (left, top) coordinates of the output image
within the input image, applying the given gravity during an embed.
@@ -284,12 +305,13 @@ namespace sharp {
/*
Convert RGBA value to another colourspace
*/
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba, VipsInterpretation const interpretation);
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba,
VipsInterpretation const interpretation, bool premultiply);
/*
Apply the alpha channel to a given colour
*/
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour);
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour, bool premultiply);
/*
Removes alpha channel, if any.

View File

@@ -110,19 +110,6 @@ VSource::new_from_options( const char *options )
return( out );
}
VOption *
VOption::set( const char *name, const VSource value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_SOURCE );
g_value_set_object( &pair->value, value.get_source() );
options.push_back( pair );
return( this );
}
VTarget
VTarget::new_to_descriptor( int descriptor )
{
@@ -162,17 +149,4 @@ VTarget::new_to_memory()
return( out );
}
VOption *
VOption::set( const char *name, const VTarget value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_TARGET );
g_value_set_object( &pair->value, value.get_target() );
options.push_back( pair );
return( this );
}
VIPS_NAMESPACE_END

View File

@@ -51,6 +51,12 @@
VIPS_NAMESPACE_START
/**
* \namespace vips
*
* General docs for the vips namespace.
*/
std::vector<double>
to_vectorv( int n, ... )
{
@@ -140,6 +146,20 @@ VOption::set( const char *name, int value )
return( this );
}
// input guint64
VOption *
VOption::set( const char *name, guint64 value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, G_TYPE_UINT64 );
g_value_set_uint64( &pair->value, value );
options.push_back( pair );
return( this );
}
// input double
VOption *
VOption::set( const char *name, double value )
@@ -167,39 +187,17 @@ VOption::set( const char *name, const char *value )
return( this );
}
// input image
// input vips object (image, source, target, etc. etc.)
VOption *
VOption::set( const char *name, const VImage value )
VOption::set( const char *name, const VObject value )
{
Pair *pair = new Pair( name );
VipsObject *object = value.get_object();
GType type = G_OBJECT_TYPE( object );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_IMAGE );
g_value_set_object( &pair->value, value.get_image() );
options.push_back( pair );
return( this );
}
// input double array
VOption *
VOption::set( const char *name, std::vector<double> value )
{
Pair *pair = new Pair( name );
double *array;
unsigned int i;
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_ARRAY_DOUBLE );
vips_value_set_array_double( &pair->value, NULL,
static_cast< int >( value.size() ) );
array = vips_value_get_array_double( &pair->value, NULL );
for( i = 0; i < value.size(); i++ )
array[i] = value[i];
g_value_init( &pair->value, type );
g_value_set_object( &pair->value, object );
options.push_back( pair );
return( this );
@@ -229,6 +227,30 @@ VOption::set( const char *name, std::vector<int> value )
return( this );
}
// input double array
VOption *
VOption::set( const char *name, std::vector<double> value )
{
Pair *pair = new Pair( name );
double *array;
unsigned int i;
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_ARRAY_DOUBLE );
vips_value_set_array_double( &pair->value, NULL,
static_cast< int >( value.size() ) );
array = vips_value_get_array_double( &pair->value, NULL );
for( i = 0; i < value.size(); i++ )
array[i] = value[i];
options.push_back( pair );
return( this );
}
// input image array
VOption *
VOption::set( const char *name, std::vector<VImage> value )
@@ -619,6 +641,22 @@ VImage::new_from_source( VSource source, const char *option_string,
return( out );
}
VImage
VImage::new_from_memory_steal( void *data, size_t size,
int width, int height, int bands, VipsBandFormat format )
{
VipsImage *image;
if( !(image = vips_image_new_from_memory( data, size,
width, height, bands, format )) )
throw( VError() );
g_signal_connect( image, "postclose",
G_CALLBACK( vips_image_free_buffer ), data);
return( VImage( image ) );
}
VImage
VImage::new_matrix( int width, int height )
{

View File

@@ -60,17 +60,4 @@ VInterpolate::new_from_name( const char *name, VOption *options )
return( out );
}
VOption *
VOption::set( const char *name, const VInterpolate value )
{
Pair *pair = new Pair( name );
pair->input = true;
g_value_init( &pair->value, VIPS_TYPE_INTERPOLATE );
g_value_set_object( &pair->value, value.get_interpolate() );
options.push_back( pair );
return( this );
}
VIPS_NAMESPACE_END

View File

@@ -1,5 +1,5 @@
// bodies for vips operations
// Sun 5 Jul 22:36:37 BST 2020
// Wed May 12 11:30:00 AM CEST 2021
// this file is generated automatically, do not edit!
VImage VImage::CMC2LCh( VOption *options ) const
@@ -1065,6 +1065,18 @@ VImage VImage::fitsload( const char *filename, VOption *options )
return( out );
}
VImage VImage::fitsload_source( VSource source, VOption *options )
{
VImage out;
call( "fitsload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::fitssave( const char *filename, VOption *options ) const
{
call( "fitssave",
@@ -1656,6 +1668,70 @@ VImage VImage::join( VImage in2, VipsDirection direction, VOption *options ) con
return( out );
}
VImage VImage::jp2kload( const char *filename, VOption *options )
{
VImage out;
call( "jp2kload",
(options ? options : VImage::option())->
set( "out", &out )->
set( "filename", filename ) );
return( out );
}
VImage VImage::jp2kload_buffer( VipsBlob *buffer, VOption *options )
{
VImage out;
call( "jp2kload_buffer",
(options ? options : VImage::option())->
set( "out", &out )->
set( "buffer", buffer ) );
return( out );
}
VImage VImage::jp2kload_source( VSource source, VOption *options )
{
VImage out;
call( "jp2kload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::jp2ksave( const char *filename, VOption *options ) const
{
call( "jp2ksave",
(options ? options : VImage::option())->
set( "in", *this )->
set( "filename", filename ) );
}
VipsBlob *VImage::jp2ksave_buffer( VOption *options ) const
{
VipsBlob *buffer;
call( "jp2ksave_buffer",
(options ? options : VImage::option())->
set( "in", *this )->
set( "buffer", &buffer ) );
return( buffer );
}
void VImage::jp2ksave_target( VTarget target, VOption *options ) const
{
call( "jp2ksave_target",
(options ? options : VImage::option())->
set( "in", *this )->
set( "target", target ) );
}
VImage VImage::jpegload( const char *filename, VOption *options )
{
VImage out;
@@ -1727,6 +1803,70 @@ void VImage::jpegsave_target( VTarget target, VOption *options ) const
set( "target", target ) );
}
VImage VImage::jxlload( const char *filename, VOption *options )
{
VImage out;
call( "jxlload",
(options ? options : VImage::option())->
set( "out", &out )->
set( "filename", filename ) );
return( out );
}
VImage VImage::jxlload_buffer( VipsBlob *buffer, VOption *options )
{
VImage out;
call( "jxlload_buffer",
(options ? options : VImage::option())->
set( "out", &out )->
set( "buffer", buffer ) );
return( out );
}
VImage VImage::jxlload_source( VSource source, VOption *options )
{
VImage out;
call( "jxlload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::jxlsave( const char *filename, VOption *options ) const
{
call( "jxlsave",
(options ? options : VImage::option())->
set( "in", *this )->
set( "filename", filename ) );
}
VipsBlob *VImage::jxlsave_buffer( VOption *options ) const
{
VipsBlob *buffer;
call( "jxlsave_buffer",
(options ? options : VImage::option())->
set( "in", *this )->
set( "buffer", &buffer ) );
return( buffer );
}
void VImage::jxlsave_target( VTarget target, VOption *options ) const
{
call( "jxlsave_target",
(options ? options : VImage::option())->
set( "in", *this )->
set( "target", target ) );
}
VImage VImage::labelregions( VOption *options ) const
{
VImage mask;
@@ -2284,6 +2424,18 @@ VImage VImage::niftiload( const char *filename, VOption *options )
return( out );
}
VImage VImage::niftiload_source( VSource source, VOption *options )
{
VImage out;
call( "niftiload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::niftisave( const char *filename, VOption *options ) const
{
call( "niftisave",
@@ -2316,6 +2468,18 @@ VImage VImage::openslideload( const char *filename, VOption *options )
return( out );
}
VImage VImage::openslideload_source( VSource source, VOption *options )
{
VImage out;
call( "openslideload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
VImage VImage::pdfload( const char *filename, VOption *options )
{
VImage out;
@@ -3388,6 +3552,18 @@ VImage VImage::vipsload( const char *filename, VOption *options )
return( out );
}
VImage VImage::vipsload_source( VSource source, VOption *options )
{
VImage out;
call( "vipsload_source",
(options ? options : VImage::option())->
set( "out", &out )->
set( "source", source ) );
return( out );
}
void VImage::vipssave( const char *filename, VOption *options ) const
{
call( "vipssave",
@@ -3396,6 +3572,14 @@ void VImage::vipssave( const char *filename, VOption *options ) const
set( "filename", filename ) );
}
void VImage::vipssave_target( VTarget target, VOption *options ) const
{
call( "vipssave_target",
(options ? options : VImage::option())->
set( "in", *this )->
set( "target", target ) );
}
VImage VImage::webpload( const char *filename, VOption *options )
{
VImage out;

View File

@@ -90,6 +90,9 @@ class MetadataWorker : public Napi::AsyncWorker {
baton->subifds = image.get_int(VIPS_META_N_SUBIFDS);
}
baton->hasProfile = sharp::HasProfile(image);
if (image.get_typeof("background") == VIPS_TYPE_ARRAY_DOUBLE) {
baton->background = image.get_array_double("background");
}
// Derived attributes
baton->hasAlpha = sharp::HasAlpha(image);
baton->orientation = sharp::ExifOrientation(image);
@@ -209,6 +212,17 @@ class MetadataWorker : public Napi::AsyncWorker {
if (baton->subifds > 0) {
info.Set("subifds", baton->subifds);
}
if (!baton->background.empty()) {
if (baton->background.size() == 3) {
Napi::Object background = Napi::Object::New(env);
background.Set("r", baton->background[0]);
background.Set("g", baton->background[1]);
background.Set("b", baton->background[2]);
info.Set("background", background);
} else {
info.Set("background", baton->background[0]);
}
}
info.Set("hasProfile", baton->hasProfile);
info.Set("hasAlpha", baton->hasAlpha);
if (baton->orientation > 0) {

View File

@@ -42,6 +42,7 @@ struct MetadataBaton {
std::string compression;
std::vector<std::pair<int, int>> levels;
int subifds;
std::vector<double> background;
bool hasProfile;
bool hasAlpha;
int orientation;

View File

@@ -92,6 +92,13 @@ namespace sharp {
return image;
}
/*
* Contrast limiting adapative histogram equalization (CLAHE)
*/
VImage Clahe(VImage image, int const width, int const height, int const maxSlope) {
return image.hist_local(width, height, VImage::option()->set("max_slope", maxSlope));
}
/*
* Gamma encoding/decoding
*/
@@ -105,6 +112,19 @@ namespace sharp {
}
}
/**
* Produce the "negative" of the image.
*/
VImage Negate(VImage image, bool const negateAlpha) {
if (HasAlpha(image) && !negateAlpha) {
// Separate alpha channel
VImage alpha = image[image.bands() - 1];
return RemoveAlpha(image).invert().bandjoin(alpha);
} else {
return image.invert();
}
}
/*
* Gaussian blur. Use sigma of -1.0 for fast blur.
*/
@@ -162,7 +182,8 @@ namespace sharp {
0.0, 0.0, 0.0, 1.0));
}
VImage Modulate(VImage image, double const brightness, double const saturation, int const hue) {
VImage Modulate(VImage image, double const brightness, double const saturation,
int const hue, double const lightness) {
if (HasAlpha(image)) {
// Separate alpha channel
VImage alpha = image[image.bands() - 1];
@@ -170,7 +191,7 @@ namespace sharp {
.colourspace(VIPS_INTERPRETATION_LCH)
.linear(
{ brightness, saturation, 1},
{ 0.0, 0.0, static_cast<double>(hue) }
{ lightness, 0.0, static_cast<double>(hue) }
)
.colourspace(VIPS_INTERPRETATION_sRGB)
.bandjoin(alpha);
@@ -179,7 +200,7 @@ namespace sharp {
.colourspace(VIPS_INTERPRETATION_LCH)
.linear(
{ brightness, saturation, 1 },
{ 0.0, 0.0, static_cast<double>(hue) }
{ lightness, 0.0, static_cast<double>(hue) }
)
.colourspace(VIPS_INTERPRETATION_sRGB);
}
@@ -275,4 +296,16 @@ namespace sharp {
return image.linear(a, b);
}
}
/*
* Ensure the image is in a given colourspace
*/
VImage EnsureColourspace(VImage image, VipsInterpretation colourspace) {
if (colourspace != VIPS_INTERPRETATION_LAST && image.interpretation() != colourspace) {
image = image.colourspace(colourspace,
VImage::option()->set("source_space", image.interpretation()));
}
return image;
}
} // namespace sharp

View File

@@ -35,11 +35,21 @@ namespace sharp {
*/
VImage Normalise(VImage image);
/*
* Contrast limiting adapative histogram equalization (CLAHE)
*/
VImage Clahe(VImage image, int const width, int const height, int const maxSlope);
/*
* Gamma encoding/decoding
*/
VImage Gamma(VImage image, double const exponent);
/*
* Produce the "negative" of the image.
*/
VImage Negate(VImage image, bool const negateAlpha);
/*
* Gaussian blur. Use sigma of -1.0 for fast blur.
*/
@@ -88,9 +98,15 @@ namespace sharp {
VImage Recomb(VImage image, std::unique_ptr<double[]> const &matrix);
/*
* Modulate brightness, saturation and hue
* Modulate brightness, saturation, hue and lightness
*/
VImage Modulate(VImage image, double const brightness, double const saturation, int const hue);
VImage Modulate(VImage image, double const brightness, double const saturation,
int const hue, double const lightness);
/*
* Ensure the image is in a given colourspace
*/
VImage EnsureColourspace(VImage image, VipsInterpretation colourspace);
} // namespace sharp

View File

@@ -67,6 +67,7 @@ class PipelineWorker : public Napi::AsyncWorker {
vips::VImage image;
sharp::ImageType inputImageType;
std::tie(image, inputImageType) = sharp::OpenInput(baton->input);
image = sharp::EnsureColourspace(image, baton->colourspaceInput);
// Calculate angle of rotation
VipsAngle rotation;
@@ -89,7 +90,7 @@ class PipelineWorker : public Napi::AsyncWorker {
}
if (baton->rotationAngle != 0.0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground, FALSE);
image = image.rotate(baton->rotationAngle, VImage::option()->set("background", background));
}
}
@@ -214,7 +215,7 @@ class PipelineWorker : public Napi::AsyncWorker {
double yresidual = static_cast<double>(yshrink) / yfactor;
// If integral x and y shrink are equal, try to use shrink-on-load for JPEG and WebP,
// but not when applying gamma correction, pre-resize extract or trim
// but not when applying gamma correction, pre-resize extract, trim or input colourspace
int shrink_on_load = 1;
int shrink_on_load_factor = 1;
@@ -226,7 +227,9 @@ class PipelineWorker : public Napi::AsyncWorker {
if (
xshrink == yshrink && xshrink >= 2 * shrink_on_load_factor &&
(inputImageType == sharp::ImageType::JPEG || inputImageType == sharp::ImageType::WEBP) &&
baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimThreshold == 0.0
baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimThreshold == 0.0 &&
baton->colourspaceInput == VIPS_INTERPRETATION_LAST &&
image.width() > 3 && image.height() > 3 && baton->input->pages == 1
) {
if (xshrink >= 8 * shrink_on_load_factor) {
xfactor = xfactor / 8;
@@ -286,16 +289,21 @@ class PipelineWorker : public Napi::AsyncWorker {
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeHeight);
}
}
// Remove animation properties from single page images
if (baton->input->pages == 1) {
image = sharp::RemoveAnimationProperties(image);
}
// Ensure we're using a device-independent colour space
char const *processingProfile = image.interpretation() == VIPS_INTERPRETATION_RGB16 ? "p3" : "srgb";
if (
sharp::HasProfile(image) &&
image.interpretation() != VIPS_INTERPRETATION_LABS &&
image.interpretation() != VIPS_INTERPRETATION_GREY16
) {
// Convert to sRGB using embedded profile
// Convert to sRGB/P3 using embedded profile
try {
image = image.icc_transform("srgb", VImage::option()
image = image.icc_transform(processingProfile, VImage::option()
->set("embedded", TRUE)
->set("depth", image.interpretation() == VIPS_INTERPRETATION_RGB16 ? 16 : 8)
->set("intent", VIPS_INTENT_PERCEPTUAL));
@@ -303,7 +311,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Ignore failure of embedded profile
}
} else if (image.interpretation() == VIPS_INTERPRETATION_CMYK) {
image = image.icc_transform("srgb", VImage::option()
image = image.icc_transform(processingProfile, VImage::option()
->set("input_profile", "cmyk")
->set("intent", VIPS_INTENT_PERCEPTUAL));
}
@@ -324,7 +332,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Negate the colours in the image
if (baton->negate) {
image = image.invert();
image = sharp::Negate(image, baton->negateAlpha);
}
// Gamma encoding (darken)
@@ -343,7 +351,9 @@ class PipelineWorker : public Napi::AsyncWorker {
bool const shouldSharpen = baton->sharpenSigma != 0.0;
bool const shouldApplyMedian = baton->medianSize > 0;
bool const shouldComposite = !baton->composite.empty();
bool const shouldModulate = baton->brightness != 1.0 || baton->saturation != 1.0 || baton->hue != 0.0;
bool const shouldModulate = baton->brightness != 1.0 || baton->saturation != 1.0 ||
baton->hue != 0.0 || baton->lightness != 0.0;
bool const shouldApplyClahe = baton->claheWidth != 0 && baton->claheHeight != 0;
if (shouldComposite && !sharp::HasAlpha(image)) {
image = sharp::EnsureAlpha(image, 1);
@@ -372,11 +382,15 @@ class PipelineWorker : public Napi::AsyncWorker {
// Ensure shortest edge is at least 1 pixel
if (image.width() / xfactor < 0.5) {
xfactor = 2 * image.width();
baton->width = 1;
if (baton->canvas != Canvas::EMBED) {
baton->width = 1;
}
}
if (image.height() / yfactor < 0.5) {
yfactor = 2 * image.height();
baton->height = 1;
if (baton->canvas != Canvas::EMBED) {
baton->height = 1;
}
}
image = image.resize(1.0 / xfactor, VImage::option()
->set("vscale", 1.0 / yfactor)
@@ -409,6 +423,7 @@ class PipelineWorker : public Napi::AsyncWorker {
for (unsigned int i = 0; i < baton->joinChannelIn.size(); i++) {
std::tie(joinImage, joinImageType) = sharp::OpenInput(baton->joinChannelIn[i]);
joinImage = sharp::EnsureColourspace(joinImage, baton->colourspaceInput);
image = image.bandjoin(joinImage);
}
image = image.copy(VImage::option()->set("interpretation", baton->colourspace));
@@ -418,7 +433,7 @@ class PipelineWorker : public Napi::AsyncWorker {
if (image.width() != baton->width || image.height() != baton->height) {
if (baton->canvas == Canvas::EMBED) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground, shouldPremultiplyAlpha);
// Embed
@@ -475,7 +490,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Rotate post-extract non-90 angle
if (!baton->rotateBeforePreExtract && baton->rotationAngle != 0.0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground, shouldPremultiplyAlpha);
image = image.rotate(baton->rotationAngle, VImage::option()->set("background", background));
}
@@ -488,7 +503,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Affine transform
if (baton->affineMatrix.size() > 0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->affineBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->affineBackground, shouldPremultiplyAlpha);
image = image.affine(baton->affineMatrix, VImage::option()->set("background", background)
->set("idx", baton->affineIdx)
->set("idy", baton->affineIdy)
@@ -500,7 +515,7 @@ class PipelineWorker : public Napi::AsyncWorker {
// Extend edges
if (baton->extendTop > 0 || baton->extendBottom > 0 || baton->extendLeft > 0 || baton->extendRight > 0) {
std::vector<double> background;
std::tie(image, background) = sharp::ApplyAlpha(image, baton->extendBackground);
std::tie(image, background) = sharp::ApplyAlpha(image, baton->extendBackground, shouldPremultiplyAlpha);
// Embed
baton->width = image.width() + baton->extendLeft + baton->extendRight;
@@ -537,7 +552,7 @@ class PipelineWorker : public Napi::AsyncWorker {
}
if (shouldModulate) {
image = sharp::Modulate(image, baton->brightness, baton->saturation, baton->hue);
image = sharp::Modulate(image, baton->brightness, baton->saturation, baton->hue, baton->lightness);
}
// Sharpen
@@ -550,7 +565,8 @@ class PipelineWorker : public Napi::AsyncWorker {
for (Composite *composite : baton->composite) {
VImage compositeImage;
sharp::ImageType compositeImageType = sharp::ImageType::UNKNOWN;
std::tie(compositeImage, compositeImageType) = OpenInput(composite->input);
std::tie(compositeImage, compositeImageType) = sharp::OpenInput(composite->input);
compositeImage = sharp::EnsureColourspace(compositeImage, baton->colourspaceInput);
// Verify within current dimensions
if (compositeImage.width() > image.width() || compositeImage.height() > image.height()) {
throw vips::VError("Image to composite must have same dimensions or smaller");
@@ -649,11 +665,17 @@ class PipelineWorker : public Napi::AsyncWorker {
image = sharp::Normalise(image);
}
// Apply contrast limiting adaptive histogram equalization (CLAHE)
if (shouldApplyClahe) {
image = sharp::Clahe(image, baton->claheWidth, baton->claheHeight, baton->claheMaxSlope);
}
// Apply bitwise boolean operation between images
if (baton->boolean != nullptr) {
VImage booleanImage;
sharp::ImageType booleanImageType = sharp::ImageType::UNKNOWN;
std::tie(booleanImage, booleanImageType) = sharp::OpenInput(baton->boolean);
booleanImage = sharp::EnsureColourspace(booleanImage, baton->colourspaceInput);
image = sharp::Boolean(image, booleanImage, baton->booleanOp);
}
@@ -703,9 +725,10 @@ class PipelineWorker : public Napi::AsyncWorker {
// Convert colourspace, pass the current known interpretation so libvips doesn't have to guess
image = image.colourspace(baton->colourspace, VImage::option()->set("source_space", image.interpretation()));
// Transform colours from embedded profile to output profile
if (baton->withMetadata && sharp::HasProfile(image)) {
image = image.icc_transform(vips_enum_nick(VIPS_TYPE_INTERPRETATION, baton->colourspace),
VImage::option()->set("embedded", TRUE));
if (baton->withMetadata && sharp::HasProfile(image) && baton->withMetadataIcc.empty()) {
image = image.icc_transform("srgb", VImage::option()
->set("embedded", TRUE)
->set("intent", VIPS_INTENT_PERCEPTUAL));
}
}
@@ -714,13 +737,18 @@ class PipelineWorker : public Napi::AsyncWorker {
image = image.icc_transform(
const_cast<char*>(baton->withMetadataIcc.data()),
VImage::option()
->set("input_profile", "srgb")
->set("input_profile", processingProfile)
->set("embedded", TRUE)
->set("intent", VIPS_INTENT_PERCEPTUAL));
}
// Override EXIF Orientation tag
if (baton->withMetadata && baton->withMetadataOrientation != -1) {
image = sharp::SetExifOrientation(image, baton->withMetadataOrientation);
}
// Override pixel density
if (baton->withMetadataDensity > 0) {
image = sharp::SetDensity(image, baton->withMetadataDensity);
}
// Metadata key/value pairs, e.g. EXIF
if (!baton->withMetadataStrs.empty()) {
image = image.copy();
@@ -744,6 +772,7 @@ class PipelineWorker : public Napi::AsyncWorker {
baton->loop);
// Output
sharp::SetTimeout(image, baton->timeoutSeconds);
if (baton->fileOut.empty()) {
// Buffer output
if (baton->formatOut == "jpeg" || (baton->formatOut == "input" && inputImageType == sharp::ImageType::JPEG)) {
@@ -754,8 +783,8 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("Q", baton->jpegQuality)
->set("interlace", baton->jpegProgressive)
->set("subsample_mode", baton->jpegChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_JPEG_SUBSAMPLE_OFF
: VIPS_FOREIGN_JPEG_SUBSAMPLE_ON)
? VIPS_FOREIGN_SUBSAMPLE_OFF
: VIPS_FOREIGN_SUBSAMPLE_ON)
->set("trellis_quant", baton->jpegTrellisQuantisation)
->set("quant_table", baton->jpegQuantisationTable)
->set("overshoot_deringing", baton->jpegOvershootDeringing)
@@ -771,6 +800,22 @@ class PipelineWorker : public Napi::AsyncWorker {
} else {
baton->channels = std::min(baton->channels, 3);
}
} else if (baton->formatOut == "jp2" || (baton->formatOut == "input"
&& inputImageType == sharp::ImageType::JP2)) {
// Write JP2 to Buffer
sharp::AssertImageTypeDimensions(image, sharp::ImageType::JP2);
VipsArea *area = reinterpret_cast<VipsArea*>(image.jp2ksave_buffer(VImage::option()
->set("Q", baton->jp2Quality)
->set("lossless", baton->jp2Lossless)
->set("subsample_mode", baton->jp2ChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
->set("tile_height", baton->jp2TileHeight)
->set("tile_width", baton->jp2TileWidth)));
baton->bufferOut = static_cast<char*>(area->data);
baton->bufferOutLength = area->length;
area->free_fn = nullptr;
vips_area_unref(area);
baton->formatOut = "jp2";
} else if (baton->formatOut == "png" || (baton->formatOut == "input" &&
(inputImageType == sharp::ImageType::PNG || (inputImageType == sharp::ImageType::GIF && !supportsGifOutput) ||
inputImageType == sharp::ImageType::SVG))) {
@@ -783,7 +828,7 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("filter", baton->pngAdaptiveFiltering ? VIPS_FOREIGN_PNG_FILTER_ALL : VIPS_FOREIGN_PNG_FILTER_NONE)
->set("palette", baton->pngPalette)
->set("Q", baton->pngQuality)
->set("colours", baton->pngColours)
->set("bitdepth", baton->pngBitdepth)
->set("dither", baton->pngDither)));
baton->bufferOut = static_cast<char*>(area->data);
baton->bufferOutLength = area->length;
@@ -852,15 +897,14 @@ class PipelineWorker : public Napi::AsyncWorker {
} else if (baton->formatOut == "heif" ||
(baton->formatOut == "input" && inputImageType == sharp::ImageType::HEIF)) {
// Write HEIF to buffer
image = sharp::RemoveAnimationProperties(image);
VipsArea *area = reinterpret_cast<VipsArea*>(image.heifsave_buffer(VImage::option()
->set("strip", !baton->withMetadata)
->set("compression", baton->heifCompression)
->set("Q", baton->heifQuality)
->set("compression", baton->heifCompression)
->set("speed", baton->heifSpeed)
#if defined(VIPS_TYPE_FOREIGN_SUBSAMPLE)
->set("subsample_mode", baton->heifChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
#endif
->set("lossless", baton->heifLossless)));
baton->bufferOut = static_cast<char*>(area->data);
baton->bufferOutLength = area->length;
@@ -875,9 +919,9 @@ class PipelineWorker : public Napi::AsyncWorker {
image = image[0];
baton->channels = 1;
}
if (image.format() != VIPS_FORMAT_UCHAR) {
// Cast pixels to uint8 (unsigned char)
image = image.cast(VIPS_FORMAT_UCHAR);
if (image.format() != baton->rawDepth) {
// Cast pixels to requested format
image = image.cast(baton->rawDepth);
}
// Get raw image data
baton->bufferOut = static_cast<char*>(image.write_to_memory(&baton->bufferOutLength));
@@ -903,13 +947,14 @@ class PipelineWorker : public Napi::AsyncWorker {
bool const isWebp = sharp::IsWebp(baton->fileOut);
bool const isGif = sharp::IsGif(baton->fileOut);
bool const isTiff = sharp::IsTiff(baton->fileOut);
bool const isJp2 = sharp::IsJp2(baton->fileOut);
bool const isHeif = sharp::IsHeif(baton->fileOut);
bool const isDz = sharp::IsDz(baton->fileOut);
bool const isDzZip = sharp::IsDzZip(baton->fileOut);
bool const isV = sharp::IsV(baton->fileOut);
bool const mightMatchInput = baton->formatOut == "input";
bool const willMatchInput = mightMatchInput &&
!(isJpeg || isPng || isWebp || isGif || isTiff || isHeif || isDz || isDzZip || isV);
!(isJpeg || isPng || isWebp || isGif || isTiff || isJp2 || isHeif || isDz || isDzZip || isV);
if (baton->formatOut == "jpeg" || (mightMatchInput && isJpeg) ||
(willMatchInput && inputImageType == sharp::ImageType::JPEG)) {
@@ -920,8 +965,8 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("Q", baton->jpegQuality)
->set("interlace", baton->jpegProgressive)
->set("subsample_mode", baton->jpegChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_JPEG_SUBSAMPLE_OFF
: VIPS_FOREIGN_JPEG_SUBSAMPLE_ON)
? VIPS_FOREIGN_SUBSAMPLE_OFF
: VIPS_FOREIGN_SUBSAMPLE_ON)
->set("trellis_quant", baton->jpegTrellisQuantisation)
->set("quant_table", baton->jpegQuantisationTable)
->set("overshoot_deringing", baton->jpegOvershootDeringing)
@@ -929,6 +974,18 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("optimize_coding", baton->jpegOptimiseCoding));
baton->formatOut = "jpeg";
baton->channels = std::min(baton->channels, 3);
} else if (baton->formatOut == "jp2" || (mightMatchInput && isJp2) ||
(willMatchInput && (inputImageType == sharp::ImageType::JP2))) {
// Write JP2 to file
sharp::AssertImageTypeDimensions(image, sharp::ImageType::JP2);
image.jp2ksave(const_cast<char*>(baton->fileOut.data()), VImage::option()
->set("Q", baton->jp2Quality)
->set("lossless", baton->jp2Lossless)
->set("subsample_mode", baton->jp2ChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
->set("tile_height", baton->jp2TileHeight)
->set("tile_width", baton->jp2TileWidth));
baton->formatOut = "jp2";
} else if (baton->formatOut == "png" || (mightMatchInput && isPng) || (willMatchInput &&
(inputImageType == sharp::ImageType::PNG || (inputImageType == sharp::ImageType::GIF && !supportsGifOutput) ||
inputImageType == sharp::ImageType::SVG))) {
@@ -941,7 +998,7 @@ class PipelineWorker : public Napi::AsyncWorker {
->set("filter", baton->pngAdaptiveFiltering ? VIPS_FOREIGN_PNG_FILTER_ALL : VIPS_FOREIGN_PNG_FILTER_NONE)
->set("palette", baton->pngPalette)
->set("Q", baton->pngQuality)
->set("colours", baton->pngColours)
->set("bitdepth", baton->pngBitdepth)
->set("dither", baton->pngDither));
baton->formatOut = "png";
} else if (baton->formatOut == "webp" || (mightMatchInput && isWebp) ||
@@ -994,15 +1051,14 @@ class PipelineWorker : public Napi::AsyncWorker {
} else if (baton->formatOut == "heif" || (mightMatchInput && isHeif) ||
(willMatchInput && inputImageType == sharp::ImageType::HEIF)) {
// Write HEIF to file
image = sharp::RemoveAnimationProperties(image);
image.heifsave(const_cast<char*>(baton->fileOut.data()), VImage::option()
->set("strip", !baton->withMetadata)
->set("Q", baton->heifQuality)
->set("compression", baton->heifCompression)
->set("speed", baton->heifSpeed)
#if defined(VIPS_TYPE_FOREIGN_SUBSAMPLE)
->set("subsample_mode", baton->heifChromaSubsampling == "4:4:4"
? VIPS_FOREIGN_SUBSAMPLE_OFF : VIPS_FOREIGN_SUBSAMPLE_ON)
#endif
->set("lossless", baton->heifLossless));
baton->formatOut = "heif";
} else if (baton->formatOut == "dz" || isDz || isDzZip) {
@@ -1119,6 +1175,9 @@ class PipelineWorker : public Napi::AsyncWorker {
info.Set("width", static_cast<uint32_t>(width));
info.Set("height", static_cast<uint32_t>(height));
info.Set("channels", static_cast<uint32_t>(baton->channels));
if (baton->formatOut == "raw") {
info.Set("depth", vips_enum_nick(VIPS_TYPE_BAND_FORMAT, baton->rawDepth));
}
info.Set("premultiplied", baton->premultiplied);
if (baton->hasCropOffset) {
info.Set("cropOffsetLeft", static_cast<int32_t>(baton->cropOffsetLeft));
@@ -1308,10 +1367,12 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->flatten = sharp::AttrAsBool(options, "flatten");
baton->flattenBackground = sharp::AttrAsVectorOfDouble(options, "flattenBackground");
baton->negate = sharp::AttrAsBool(options, "negate");
baton->negateAlpha = sharp::AttrAsBool(options, "negateAlpha");
baton->blurSigma = sharp::AttrAsDouble(options, "blurSigma");
baton->brightness = sharp::AttrAsDouble(options, "brightness");
baton->saturation = sharp::AttrAsDouble(options, "saturation");
baton->hue = sharp::AttrAsInt32(options, "hue");
baton->lightness = sharp::AttrAsDouble(options, "lightness");
baton->medianSize = sharp::AttrAsUint32(options, "medianSize");
baton->sharpenSigma = sharp::AttrAsDouble(options, "sharpenSigma");
baton->sharpenFlat = sharp::AttrAsDouble(options, "sharpenFlat");
@@ -1325,6 +1386,9 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->linearB = sharp::AttrAsDouble(options, "linearB");
baton->greyscale = sharp::AttrAsBool(options, "greyscale");
baton->normalise = sharp::AttrAsBool(options, "normalise");
baton->claheWidth = sharp::AttrAsUint32(options, "claheWidth");
baton->claheHeight = sharp::AttrAsUint32(options, "claheHeight");
baton->claheMaxSlope = sharp::AttrAsUint32(options, "claheMaxSlope");
baton->useExifOrientation = sharp::AttrAsBool(options, "useExifOrientation");
baton->angle = sharp::AttrAsInt32(options, "angle");
baton->rotationAngle = sharp::AttrAsDouble(options, "rotationAngle");
@@ -1375,6 +1439,10 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->recombMatrix[i] = sharp::AttrAsDouble(recombMatrix, i);
}
}
baton->colourspaceInput = sharp::GetInterpretation(sharp::AttrAsStr(options, "colourspaceInput"));
if (baton->colourspaceInput == VIPS_INTERPRETATION_ERROR) {
baton->colourspaceInput = VIPS_INTERPRETATION_LAST;
}
baton->colourspace = sharp::GetInterpretation(sharp::AttrAsStr(options, "colourspace"));
if (baton->colourspace == VIPS_INTERPRETATION_ERROR) {
baton->colourspace = VIPS_INTERPRETATION_sRGB;
@@ -1384,6 +1452,7 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->fileOut = sharp::AttrAsStr(options, "fileOut");
baton->withMetadata = sharp::AttrAsBool(options, "withMetadata");
baton->withMetadataOrientation = sharp::AttrAsUint32(options, "withMetadataOrientation");
baton->withMetadataDensity = sharp::AttrAsDouble(options, "withMetadataDensity");
baton->withMetadataIcc = sharp::AttrAsStr(options, "withMetadataIcc");
Napi::Object mdStrs = options.Get("withMetadataStrs").As<Napi::Object>();
Napi::Array mdStrKeys = mdStrs.GetPropertyNames();
@@ -1391,6 +1460,7 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
std::string k = sharp::AttrAsStr(mdStrKeys, i);
baton->withMetadataStrs.insert(std::make_pair(k, sharp::AttrAsStr(mdStrs, k)));
}
baton->timeoutSeconds = sharp::AttrAsUint32(options, "timeoutSeconds");
// Format-specific
baton->jpegQuality = sharp::AttrAsUint32(options, "jpegQuality");
baton->jpegProgressive = sharp::AttrAsBool(options, "jpegProgressive");
@@ -1405,8 +1475,13 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->pngAdaptiveFiltering = sharp::AttrAsBool(options, "pngAdaptiveFiltering");
baton->pngPalette = sharp::AttrAsBool(options, "pngPalette");
baton->pngQuality = sharp::AttrAsUint32(options, "pngQuality");
baton->pngColours = sharp::AttrAsUint32(options, "pngColours");
baton->pngBitdepth = sharp::AttrAsUint32(options, "pngBitdepth");
baton->pngDither = sharp::AttrAsDouble(options, "pngDither");
baton->jp2Quality = sharp::AttrAsUint32(options, "jp2Quality");
baton->jp2Lossless = sharp::AttrAsBool(options, "jp2Lossless");
baton->jp2TileHeight = sharp::AttrAsUint32(options, "jp2TileHeight");
baton->jp2TileWidth = sharp::AttrAsUint32(options, "jp2TileWidth");
baton->jp2ChromaSubsampling = sharp::AttrAsStr(options, "jp2ChromaSubsampling");
baton->webpQuality = sharp::AttrAsUint32(options, "webpQuality");
baton->webpAlphaQuality = sharp::AttrAsUint32(options, "webpAlphaQuality");
baton->webpLossless = sharp::AttrAsBool(options, "webpLossless");
@@ -1421,6 +1496,9 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->tiffTileHeight = sharp::AttrAsUint32(options, "tiffTileHeight");
baton->tiffXres = sharp::AttrAsDouble(options, "tiffXres");
baton->tiffYres = sharp::AttrAsDouble(options, "tiffYres");
if (baton->tiffXres == 1.0 && baton->tiffYres == 1.0 && baton->withMetadataDensity > 0) {
baton->tiffXres = baton->tiffYres = baton->withMetadataDensity / 25.4;
}
// tiff compression options
baton->tiffCompression = static_cast<VipsForeignTiffCompression>(
vips_enum_from_nick(nullptr, VIPS_TYPE_FOREIGN_TIFF_COMPRESSION,
@@ -1436,6 +1514,11 @@ Napi::Value pipeline(const Napi::CallbackInfo& info) {
baton->heifSpeed = sharp::AttrAsUint32(options, "heifSpeed");
baton->heifChromaSubsampling = sharp::AttrAsStr(options, "heifChromaSubsampling");
// Raw output
baton->rawDepth = static_cast<VipsBandFormat>(
vips_enum_from_nick(nullptr, VIPS_TYPE_BAND_FORMAT,
sharp::AttrAsStr(options, "rawDepth").data()));
// Animated output
if (sharp::HasAttr(options, "pageHeight")) {
baton->pageHeight = sharp::AttrAsUint32(options, "pageHeight");

View File

@@ -90,10 +90,12 @@ struct PipelineBaton {
bool flatten;
std::vector<double> flattenBackground;
bool negate;
bool negateAlpha;
double blurSigma;
double brightness;
double saturation;
int hue;
double lightness;
int medianSize;
double sharpenSigma;
double sharpenFlat;
@@ -109,6 +111,9 @@ struct PipelineBaton {
double gammaOut;
bool greyscale;
bool normalise;
int claheWidth;
int claheHeight;
int claheMaxSlope;
bool useExifOrientation;
int angle;
double rotationAngle;
@@ -142,8 +147,13 @@ struct PipelineBaton {
bool pngAdaptiveFiltering;
bool pngPalette;
int pngQuality;
int pngColours;
int pngBitdepth;
double pngDither;
int jp2Quality;
bool jp2Lossless;
int jp2TileHeight;
int jp2TileWidth;
std::string jp2ChromaSubsampling;
int webpQuality;
int webpAlphaQuality;
bool webpNearLossless;
@@ -165,11 +175,14 @@ struct PipelineBaton {
int heifSpeed;
std::string heifChromaSubsampling;
bool heifLossless;
VipsBandFormat rawDepth;
std::string err;
bool withMetadata;
int withMetadataOrientation;
double withMetadataDensity;
std::string withMetadataIcc;
std::unordered_map<std::string, std::string> withMetadataStrs;
int timeoutSeconds;
std::unique_ptr<double[]> convKernel;
int convKernelWidth;
int convKernelHeight;
@@ -181,6 +194,7 @@ struct PipelineBaton {
int extractChannel;
bool removeAlpha;
double ensureAlpha;
VipsInterpretation colourspaceInput;
VipsInterpretation colourspace;
int pageHeight;
std::vector<int> delay;
@@ -215,10 +229,12 @@ struct PipelineBaton {
flatten(false),
flattenBackground{ 0.0, 0.0, 0.0 },
negate(false),
negateAlpha(true),
blurSigma(0.0),
brightness(1.0),
saturation(1.0),
hue(0),
lightness(0),
medianSize(0),
sharpenSigma(0.0),
sharpenFlat(1.0),
@@ -233,6 +249,9 @@ struct PipelineBaton {
gamma(0.0),
greyscale(false),
normalise(false),
claheWidth(0),
claheHeight(0),
claheMaxSlope(3),
useExifOrientation(false),
angle(0),
rotationAngle(0.0),
@@ -265,8 +284,13 @@ struct PipelineBaton {
pngAdaptiveFiltering(false),
pngPalette(false),
pngQuality(100),
pngColours(256),
pngBitdepth(8),
pngDither(1.0),
jp2Quality(80),
jp2Lossless(false),
jp2TileHeight(512),
jp2TileWidth(512),
jp2ChromaSubsampling("4:4:4"),
webpQuality(80),
webpAlphaQuality(100),
webpNearLossless(false),
@@ -286,10 +310,13 @@ struct PipelineBaton {
heifQuality(50),
heifCompression(VIPS_FOREIGN_HEIF_COMPRESSION_AV1),
heifSpeed(5),
heifChromaSubsampling("4:2:0"),
heifChromaSubsampling("4:4:4"),
heifLossless(false),
rawDepth(VIPS_FORMAT_UCHAR),
withMetadata(false),
withMetadataOrientation(-1),
withMetadataDensity(0.0),
timeoutSeconds(0),
convKernelWidth(0),
convKernelHeight(0),
convKernelScale(0.0),
@@ -300,6 +327,7 @@ struct PipelineBaton {
extractChannel(-1),
removeAlpha(false),
ensureAlpha(-1.0),
colourspaceInput(VIPS_INTERPRETATION_LAST),
colourspace(VIPS_INTERPRETATION_LAST),
pageHeight(0),
delay{-1},

View File

@@ -115,7 +115,7 @@ Napi::Value format(const Napi::CallbackInfo& info) {
Napi::Object format = Napi::Object::New(env);
for (std::string const f : {
"jpeg", "png", "webp", "tiff", "magick", "openslide", "dz",
"ppm", "fits", "gif", "svg", "heif", "pdf", "vips"
"ppm", "fits", "gif", "svg", "heif", "pdf", "vips", "jp2k"
}) {
// Input
Napi::Boolean hasInputFile =

View File

@@ -8,16 +8,18 @@
"test": "node perf && node random && node parallel"
},
"devDependencies": {
"async": "3.2.0",
"@squoosh/cli": "0.7.2",
"@squoosh/lib": "0.4.0",
"async": "3.2.1",
"benchmark": "2.1.4",
"gm": "1.23.1",
"imagemagick": "0.1.3",
"jimp": "0.16.1",
"mapnik": "4.5.6",
"semver": "7.3.4"
"mapnik": "4.5.8",
"semver": "7.3.5"
},
"license": "Apache-2.0",
"engines": {
"node": "14"
"node": "16"
}
}

View File

@@ -2,6 +2,7 @@
const os = require('os');
const fs = require('fs');
const { exec } = require('child_process');
const async = require('async');
const assert = require('assert');
@@ -13,6 +14,7 @@ const gm = require('gm');
const imagemagick = require('imagemagick');
const mapnik = require('mapnik');
const jimp = require('jimp');
const squoosh = require('@squoosh/lib');
const fixtures = require('../fixtures');
@@ -75,6 +77,65 @@ async.series({
});
}
});
// squoosh-cli
jpegSuite.add('squoosh-cli-file-file', {
defer: true,
fn: function (deferred) {
exec(`./node_modules/.bin/squoosh-cli \
--output-dir ${os.tmpdir()} \
--resize '{"enabled":true,"width":${width},"height":${height},"method":"lanczos3","premultiply":false,"linearRGB":false}' \
--mozjpeg '{"quality":80,"progressive":false,"optimize_coding":true,"quant_table":0,"trellis_multipass":false,"chroma_subsample":2,"separate_chroma_quality":false}' \
"${fixtures.inputJpg}"`, function (err) {
if (err) {
throw err;
}
deferred.resolve();
});
}
});
// squoosh-lib (GPLv3)
jpegSuite.add('squoosh-lib-buffer-buffer', {
defer: true,
fn: function (deferred) {
const pool = new squoosh.ImagePool();
const image = pool.ingestImage(inputJpgBuffer);
image.decoded
.then(function () {
return image.preprocess({
resize: {
enabled: true,
width,
height,
method: 'lanczos3',
premultiply: false,
linearRGB: false
}
});
})
.then(function () {
return image.encode({
mozjpeg: {
quality: 80,
progressive: false,
optimize_coding: true,
quant_table: 0,
trellis_multipass: false,
chroma_subsample: 2,
separate_chroma_quality: false
}
});
})
.then(function () {
return pool.close();
})
.then(function () {
return image.encodedWith.mozjpeg;
})
.then(function () {
deferred.resolve();
});
}
});
// mapnik
jpegSuite.add('mapnik-file-file', {
defer: true,

BIN
test/fixtures/concert.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
test/fixtures/expected/clahe-5-5-0.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

BIN
test/fixtures/expected/clahe-5-5-5.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

BIN
test/fixtures/expected/clahe-50-50-0.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 685 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

BIN
test/fixtures/gradients-rgb8.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 777 KiB

View File

@@ -2,7 +2,7 @@
const path = require('path');
const sharp = require('../../');
const maxColourDistance = require('../../build/Release/sharp')._maxColourDistance;
const maxColourDistance = require('../../lib/sharp')._maxColourDistance;
// Helpers
const getPath = function (filename) {
@@ -74,6 +74,7 @@ module.exports = {
inputJpgLossless: getPath('testimgl.jpg'), // Lossless JPEG from ftp://ftp.fu-berlin.de/unix/X11/graphics/ImageMagick/delegates/ljpeg-6b.tar.gz
inputPng: getPath('50020484-00001.png'), // http://c.searspartsdirect.com/lis_png/PLDM/50020484-00001.png
inputPngGradients: getPath('gradients-rgb8.png'),
inputPngWithTransparency: getPath('blackbug.png'), // public domain
inputPngCompleteTransparency: getPath('full-transparent.png'),
inputPngWithGreyAlpha: getPath('grey-8bit-alpha.png'),
@@ -90,6 +91,8 @@ module.exports = {
inputPngEmbed: getPath('embedgravitybird.png'), // Released to sharp under a CC BY 4.0
inputPngRGBWithAlpha: getPath('2569067123_aca715a2ee_o.png'), // http://www.flickr.com/photos/grizdave/2569067123/ (same as inputJpg)
inputPngImageInAlpha: getPath('image-in-alpha.png'), // https://github.com/lovell/sharp/issues/1597
inputPngSolidAlpha: getPath('with-alpha.png'), // https://github.com/lovell/sharp/issues/1599
inputPngP3: getPath('p3.png'), // https://github.com/lovell/sharp/issues/2862
inputWebP: getPath('4.webp'), // http://www.gstatic.com/webp/gallery/4.webp
inputWebPWithTransparency: getPath('5_webp_a.webp'), // http://www.gstatic.com/webp/gallery3/5_webp_a.webp
@@ -102,6 +105,8 @@ module.exports = {
inputTiffUncompressed: getPath('uncompressed_tiff.tiff'), // https://code.google.com/archive/p/imagetestsuite/wikis/TIFFTestSuite.wiki file: 0c84d07e1b22b76f24cccc70d8788e4a.tif
inputTiff8BitDepth: getPath('8bit_depth.tiff'),
inputTifftagPhotoshop: getPath('tifftag-photoshop.tiff'), // https://github.com/lovell/sharp/issues/1600
inputJp2: getPath('relax.jp2'), // https://www.fnordware.com/j2k/relax.jp2
inputGif: getPath('Crash_test.gif'), // http://upload.wikimedia.org/wikipedia/commons/e/e3/Crash_test.gif
inputGifGreyPlusAlpha: getPath('grey-plus-alpha.gif'), // http://i.imgur.com/gZ5jlmE.gif
inputGifAnimated: getPath('rotating-squares.gif'), // CC0 https://loading.io/spinner/blocks/-rotating-squares-preloader-gif
@@ -120,6 +125,8 @@ module.exports = {
inputV: getPath('vfile.v'),
inputJpgClahe: getPath('concert.jpg'), // public domain - https://www.flickr.com/photos/mars_/14389236779/
testPattern: getPath('test-pattern.png'),
// Path for tests requiring human inspection

BIN
test/fixtures/p3.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 610 B

BIN
test/fixtures/relax.jp2 vendored Normal file

Binary file not shown.

BIN
test/fixtures/with-alpha.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@@ -444,6 +444,22 @@
...
fun:vips__init
}
{
leak_libvips_thread_pool_new
Memcheck:Leak
match-leak-kinds: possible
fun:calloc
...
fun:g_system_thread_new
}
{
leak_libvips_thread_pool_push
Memcheck:Leak
match-leak-kinds: possible
fun:calloc
...
fun:g_thread_pool_push
}
{
leak_rsvg_static_data
Memcheck:Leak
@@ -480,6 +496,14 @@
...
fun:rsvg_handle_new_from_gfile_sync
}
{
leak_rsvg_rust_280_bytes_static_regex
Memcheck:Leak
match-leak-kinds: possible
fun:malloc
...
fun:rsvg_handle_get_dimensions_sub
}
# libuv warnings
{
@@ -871,3 +895,12 @@
...
fun:_ZN2v88internal18ArrayBufferSweeper10ReleaseAllEv
}
{
addr_v8_ZN2v88internal12_GLOBAL__N_119HandleApiCallHelperILb0EEENS0
Memcheck:Addr8
fun:strncmp
...
fun:_ZZN4node7binding6DLOpenERKN2v820FunctionCallbackInfoINS1_5ValueEEEENKUlPNS0_4DLibEE_clES8_
fun:_ZN4node7binding6DLOpenERKN2v820FunctionCallbackInfoINS1_5ValueEEE
fun:_ZN2v88internal12_GLOBAL__N_119HandleApiCallHelperILb0EEENS0_11MaybeHandleINS0_6ObjectEEEPNS0_7IsolateENS0_6HandleINS0_10HeapObjectEEESA_NS8_INS0_20FunctionTemplateInfoEEENS8_IS4_EENS0_16BuiltinArgumentsE
}

View File

@@ -19,6 +19,17 @@ describe('HTTP agent', function () {
assert.strictEqual(443, proxy.defaultPort);
});
it('HTTPS proxy with auth from HTTPS_PROXY using credentials containing special characters', function () {
process.env.HTTPS_PROXY = 'https://user,:pass=@secure:123';
const proxy = agent();
delete process.env.HTTPS_PROXY;
assert.strictEqual('object', typeof proxy);
assert.strictEqual('secure', proxy.options.proxy.host);
assert.strictEqual(123, proxy.options.proxy.port);
assert.strictEqual('user,:pass=', proxy.options.proxy.proxyAuth);
assert.strictEqual(443, proxy.defaultPort);
});
it('HTTP proxy without auth from npm_config_proxy', function () {
process.env.npm_config_proxy = 'http://plaintext:456';
const proxy = agent();

View File

@@ -3,7 +3,7 @@
const assert = require('assert');
const sharp = require('../../');
const { inputAvif, inputJpg } = require('../fixtures');
const { inputAvif, inputJpg, inputGifAnimated } = require('../fixtures');
describe('AVIF', () => {
it('called without options does not throw an error', () => {
@@ -81,4 +81,29 @@ describe('AVIF', () => {
width: 32
});
});
it('can convert animated GIF to non-animated AVIF', async () => {
const data = await sharp(inputGifAnimated, { animated: true })
.resize(10)
.avif({ speed: 8 })
.toBuffer();
const metadata = await sharp(data)
.metadata();
const { size, ...metadataWithoutSize } = metadata;
assert.deepStrictEqual(metadataWithoutSize, {
channels: 4,
compression: 'av1',
depth: 'uchar',
format: 'heif',
hasAlpha: true,
hasProfile: false,
height: 300,
isProgressive: false,
pageHeight: 300,
pagePrimary: 0,
pages: 1,
space: 'srgb',
width: 10
});
});
});

139
test/unit/clahe.js Normal file
View File

@@ -0,0 +1,139 @@
'use strict';
const assert = require('assert');
const sharp = require('../../lib');
const fixtures = require('../fixtures');
describe('Clahe', function () {
it('width 5 width 5 maxSlope 0', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 5, height: 5, maxSlope: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-5-5-0.jpg'), data, { threshold: 10 }, done);
});
});
it('width 5 width 5 maxSlope 5', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 5, height: 5, maxSlope: 5 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-5-5-5.jpg'), data, done);
});
});
it('width 11 width 25 maxSlope 14', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 11, height: 25, maxSlope: 14 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-11-25-14.jpg'), data, done);
});
});
it('width 50 width 50 maxSlope 0', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 50, height: 50, maxSlope: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-50-50-0.jpg'), data, done);
});
});
it('width 50 width 50 maxSlope 14', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 50, height: 50, maxSlope: 14 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-50-50-14.jpg'), data, done);
});
});
it('width 100 width 50 maxSlope 3', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 100, height: 50, maxSlope: 3 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-100-50-3.jpg'), data, done);
});
});
it('width 100 width 100 maxSlope 0', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 100, height: 100, maxSlope: 0 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-100-100-0.jpg'), data, done);
});
});
it('invalid maxSlope', function () {
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: 100, maxSlope: -5 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: 100, maxSlope: 110 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: 100, maxSlope: 5.5 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: 100, maxSlope: 'a string' });
});
});
it('invalid width', function () {
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100.5, height: 100 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: -5, height: 100 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: true, height: 100 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 'string test', height: 100 });
});
});
it('invalid height', function () {
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: 100.5 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: -5 });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: true });
});
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe({ width: 100, height: 'string test' });
});
});
it('invalid options object', function () {
assert.throws(function () {
sharp(fixtures.inputJpgClahe).clahe(100, 100, 5);
});
});
it('uses default maxSlope of 3', function (done) {
sharp(fixtures.inputJpgClahe)
.clahe({ width: 100, height: 50 })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
fixtures.assertSimilar(fixtures.expected('clahe-100-50-3.jpg'), data, done);
});
});
});

View File

@@ -90,7 +90,48 @@ describe('Colour space conversion', function () {
});
});
it('Invalid input', function () {
it('From sRGB with RGB16 pipeline, resize with gamma, to sRGB', function (done) {
sharp(fixtures.inputPngGradients)
.pipelineColourspace('rgb16')
.resize(320)
.gamma()
.toColourspace('srgb')
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(320, info.width);
fixtures.assertSimilar(fixtures.expected('colourspace-gradients-gamma-resize.png'), data, {
threshold: 0
}, done);
});
});
it('Convert P3 to sRGB', async () => {
const [r, g, b] = await sharp(fixtures.inputPngP3)
.raw()
.toBuffer();
assert.strictEqual(r, 255);
assert.strictEqual(g, 0);
assert.strictEqual(b, 0);
});
it('Passthrough P3', async () => {
const [r, g, b] = await sharp(fixtures.inputPngP3)
.withMetadata({ icc: 'p3' })
.raw()
.toBuffer();
assert.strictEqual(r, 234);
assert.strictEqual(g, 51);
assert.strictEqual(b, 34);
});
it('Invalid pipelineColourspace input', function () {
assert.throws(function () {
sharp(fixtures.inputJpg)
.pipelineColorspace(null);
}, /Expected string for colourspace but received null of type object/);
});
it('Invalid toColourspace input', function () {
assert.throws(function () {
sharp(fixtures.inputJpg)
.toColourspace(null);

View File

@@ -124,4 +124,30 @@ describe('Extend', function () {
fixtures.assertSimilar(fixtures.expected('extend-2channel.png'), data, done);
});
});
it('Premultiply background when compositing', async () => {
const background = '#bf1942cc';
const data = await sharp({
create: {
width: 1, height: 1, channels: 4, background: '#fff0'
}
})
.composite([{
input: {
create: {
width: 1, height: 1, channels: 4, background
}
}
}])
.extend({
left: 1, background
})
.raw()
.toBuffer();
const [r1, g1, b1, a1, r2, g2, b2, a2] = data;
assert.strictEqual(true, Math.abs(r2 - r1) < 2);
assert.strictEqual(true, Math.abs(g2 - g1) < 2);
assert.strictEqual(true, Math.abs(b2 - b1) < 2);
assert.strictEqual(true, Math.abs(a2 - a1) < 2);
});
});

View File

@@ -57,8 +57,8 @@ describe('Image channel extraction', function () {
it('With colorspace conversion', function (done) {
const output = fixtures.path('output.extract-lch.jpg');
sharp(fixtures.inputJpg)
.toColourspace('lch')
.extractChannel(1)
.toColourspace('lch')
.resize(320, 240, { fastShrinkOnLoad: false })
.toFile(output, function (err, info) {
if (err) throw err;
@@ -70,12 +70,13 @@ describe('Image channel extraction', function () {
});
it('Alpha from 16-bit PNG', function (done) {
const output = fixtures.path('output.extract-alpha-16bit.jpg');
const output = fixtures.path('output.extract-alpha-16bit.png');
sharp(fixtures.inputPngWithTransparency16bit)
.resize(16)
.extractChannel(3)
.toFile(output, function (err, info) {
.toFile(output, function (err) {
if (err) throw err;
fixtures.assertMaxColourDistance(output, fixtures.expected('extract-alpha-16bit.jpg'));
fixtures.assertMaxColourDistance(output, fixtures.expected('extract-alpha-16bit.png'));
done();
});
});

View File

@@ -57,7 +57,7 @@ describe('HEIF', () => {
});
it('out of range speed should throw an error', () => {
assert.throws(() => {
sharp().heif({ speed: 9 });
sharp().heif({ speed: 10 });
});
});
it('invalid speed should throw an error', () => {

View File

@@ -1,6 +1,7 @@
'use strict';
const fs = require('fs');
const path = require('path');
const assert = require('assert');
const rimraf = require('rimraf');
@@ -297,6 +298,21 @@ describe('Input/output', function () {
});
});
it('Support output to tif format', function (done) {
sharp(fixtures.inputTiff)
.resize(320, 240)
.toFormat('tif')
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(true, data.length > 0);
assert.strictEqual(data.length, info.size);
assert.strictEqual('tiff', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
done();
});
});
it('Fail when output File is input File', function (done) {
sharp(fixtures.inputJpg).toFile(fixtures.inputJpg, function (err) {
assert(err instanceof Error);
@@ -316,6 +332,48 @@ describe('Input/output', function () {
});
});
it('Fail when output File is input File (relative output, absolute input)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(fixtures.inputJpg).toFile(relativePath, function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is input File via Promise (relative output, absolute input)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(fixtures.inputJpg).toFile(relativePath).then(function (data) {
assert(false);
done();
}).catch(function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is input File (relative input, absolute output)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(relativePath).toFile(fixtures.inputJpg, function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is input File via Promise (relative input, absolute output)', function (done) {
const relativePath = path.relative(process.cwd(), fixtures.inputJpg);
sharp(relativePath).toFile(fixtures.inputJpg).then(function (data) {
assert(false);
done();
}).catch(function (err) {
assert(err instanceof Error);
assert.strictEqual('Cannot use same file for input and output', err.message);
done();
});
});
it('Fail when output File is empty', function (done) {
sharp(fixtures.inputJpg).toFile('', function (err) {
assert(err instanceof Error);
@@ -335,17 +393,6 @@ describe('Input/output', function () {
});
});
it('Fail when input is empty Buffer', function (done) {
sharp(Buffer.alloc(0)).toBuffer().then(function () {
assert(false);
done();
}).catch(function (err) {
assert(err instanceof Error);
assert.strictEqual('Input buffer contains unsupported image format', err.message);
done();
});
});
it('Fail when input is invalid Buffer', function (done) {
sharp(Buffer.from([0x1, 0x2, 0x3, 0x4])).toBuffer().then(function () {
assert(false);

99
test/unit/jp2.js Normal file
View File

@@ -0,0 +1,99 @@
'use strict';
const fs = require('fs');
const assert = require('assert');
const sharp = require('../../');
const fixtures = require('../fixtures');
describe('JP2 output', () => {
if (!sharp.format.jp2k.input.buffer) {
it('JP2 output should fail due to missing OpenJPEG', () => {
assert.rejects(() =>
sharp(fixtures.inputJpg)
.jp2()
.toBuffer(),
/JP2 output requires libvips with support for OpenJPEG/
);
});
it('JP2 file output should fail due to missing OpenJPEG', () => {
assert.rejects(async () => await sharp().toFile('test.jp2'),
/JP2 output requires libvips with support for OpenJPEG/
);
});
} else {
it('JP2 Buffer to PNG Buffer', () => {
sharp(fs.readFileSync(fixtures.inputJp2))
.resize(8, 15)
.png()
.toBuffer({ resolveWithObject: true })
.then(({ data, info }) => {
assert.strictEqual(true, data.length > 0);
assert.strictEqual(data.length, info.size);
assert.strictEqual('png', info.format);
assert.strictEqual(8, info.width);
assert.strictEqual(15, info.height);
assert.strictEqual(4, info.channels);
});
});
it('JP2 quality', function (done) {
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ quality: 70 })
.toBuffer(function (err, buffer70) {
if (err) throw err;
sharp(fixtures.inputJp2)
.resize(320, 240)
.toBuffer(function (err, buffer80) {
if (err) throw err;
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ quality: 90 })
.toBuffer(function (err, buffer90) {
if (err) throw err;
assert(buffer70.length < buffer80.length);
assert(buffer80.length < buffer90.length);
done();
});
});
});
});
it('Without chroma subsampling generates larger file', function (done) {
// First generate with chroma subsampling (default)
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ chromaSubsampling: '4:2:0' })
.toBuffer(function (err, withChromaSubsamplingData, withChromaSubsamplingInfo) {
if (err) throw err;
assert.strictEqual(true, withChromaSubsamplingData.length > 0);
assert.strictEqual(withChromaSubsamplingData.length, withChromaSubsamplingInfo.size);
assert.strictEqual('jp2', withChromaSubsamplingInfo.format);
assert.strictEqual(320, withChromaSubsamplingInfo.width);
assert.strictEqual(240, withChromaSubsamplingInfo.height);
// Then generate without
sharp(fixtures.inputJp2)
.resize(320, 240)
.jp2({ chromaSubsampling: '4:4:4' })
.toBuffer(function (err, withoutChromaSubsamplingData, withoutChromaSubsamplingInfo) {
if (err) throw err;
assert.strictEqual(true, withoutChromaSubsamplingData.length > 0);
assert.strictEqual(withoutChromaSubsamplingData.length, withoutChromaSubsamplingInfo.size);
assert.strictEqual('jp2', withoutChromaSubsamplingInfo.format);
assert.strictEqual(320, withoutChromaSubsamplingInfo.width);
assert.strictEqual(240, withoutChromaSubsamplingInfo.height);
assert.strictEqual(true, withChromaSubsamplingData.length <= withoutChromaSubsamplingData.length);
done();
});
});
});
it('Invalid JP2 chromaSubsampling value throws error', function () {
assert.throws(function () {
sharp().jpeg({ chromaSubsampling: '4:2:2' });
});
});
}
});

View File

@@ -240,7 +240,7 @@ describe('Image metadata', function () {
})
);
it('GIF via giflib', function (done) {
it('GIF', function (done) {
sharp(fixtures.inputGif).metadata(function (err, metadata) {
if (err) throw err;
assert.strictEqual('gif', metadata.format);
@@ -253,27 +253,26 @@ describe('Image metadata', function () {
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
assert.strictEqual(false, metadata.isProgressive);
assert.strictEqual(false, metadata.hasProfile);
assert.strictEqual(false, metadata.hasAlpha);
assert.strictEqual('undefined', typeof metadata.orientation);
assert.strictEqual('undefined', typeof metadata.exif);
assert.strictEqual('undefined', typeof metadata.icc);
assert.deepStrictEqual(metadata.background, { r: 138, g: 148, b: 102 });
done();
});
});
it('GIF grey+alpha via giflib', function (done) {
it('GIF grey+alpha', function (done) {
sharp(fixtures.inputGifGreyPlusAlpha).metadata(function (err, metadata) {
if (err) throw err;
assert.strictEqual('gif', metadata.format);
assert.strictEqual('undefined', typeof metadata.size);
assert.strictEqual(2, metadata.width);
assert.strictEqual(1, metadata.height);
assert.strictEqual(2, metadata.channels);
assert.strictEqual(4, metadata.channels);
assert.strictEqual('uchar', metadata.depth);
assert.strictEqual('undefined', typeof metadata.density);
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
assert.strictEqual(false, metadata.isProgressive);
assert.strictEqual(false, metadata.hasProfile);
assert.strictEqual(true, metadata.hasAlpha);
assert.strictEqual('undefined', typeof metadata.orientation);
assert.strictEqual('undefined', typeof metadata.exif);
assert.strictEqual('undefined', typeof metadata.icc);
@@ -287,7 +286,7 @@ describe('Image metadata', function () {
.then(({
format, width, height, space, channels, depth,
isProgressive, pages, pageHeight, loop, delay,
hasProfile, hasAlpha
background, hasProfile, hasAlpha
}) => {
assert.strictEqual(format, 'gif');
assert.strictEqual(width, 80);
@@ -300,6 +299,7 @@ describe('Image metadata', function () {
assert.strictEqual(pageHeight, 80);
assert.strictEqual(loop, 0);
assert.deepStrictEqual(delay, Array(30).fill(30));
assert.deepStrictEqual(background, { r: 0, g: 0, b: 0 });
assert.strictEqual(hasProfile, false);
assert.strictEqual(hasAlpha, true);
})
@@ -322,7 +322,7 @@ describe('Image metadata', function () {
assert.strictEqual(isProgressive, false);
assert.strictEqual(pages, 10);
assert.strictEqual(pageHeight, 285);
assert.strictEqual(loop, 3);
assert.strictEqual(loop, 2);
assert.deepStrictEqual(delay, [...Array(9).fill(3000), 15000]);
assert.strictEqual(hasProfile, false);
assert.strictEqual(hasAlpha, true);
@@ -623,6 +623,44 @@ describe('Image metadata', function () {
assert.strictEqual(parsedExif.exif.ExposureTime, 0.2);
});
it('Set density of JPEG', async () => {
const data = await sharp({
create: {
width: 8,
height: 8,
channels: 3,
background: 'red'
}
})
.withMetadata({
density: 300
})
.jpeg()
.toBuffer();
const { density } = await sharp(data).metadata();
assert.strictEqual(density, 300);
});
it('Set density of PNG', async () => {
const data = await sharp({
create: {
width: 8,
height: 8,
channels: 3,
background: 'red'
}
})
.withMetadata({
density: 96
})
.png()
.toBuffer();
const { density } = await sharp(data).metadata();
assert.strictEqual(density, 96);
});
it('chromaSubsampling 4:4:4:4 CMYK JPEG', function () {
return sharp(fixtures.inputJpgWithCmykProfile)
.metadata()
@@ -688,6 +726,25 @@ describe('Image metadata', function () {
})
);
it('AVIF', async () => {
const metadata = await sharp(fixtures.inputAvif).metadata();
assert.deepStrictEqual(metadata, {
format: 'heif',
width: 2048,
height: 858,
space: 'srgb',
channels: 3,
depth: 'uchar',
isProgressive: false,
pages: 1,
pageHeight: 858,
pagePrimary: 0,
compression: 'av1',
hasProfile: false,
hasAlpha: false
});
});
it('File input with corrupt header fails gracefully', function (done) {
sharp(fixtures.inputJpgWithCorruptHeader)
.metadata(function (err) {
@@ -736,6 +793,16 @@ describe('Image metadata', function () {
sharp().withMetadata({ orientation: 9 });
});
});
it('Non-numeric density', function () {
assert.throws(function () {
sharp().withMetadata({ density: '1' });
});
});
it('Negative density', function () {
assert.throws(function () {
sharp().withMetadata({ density: -1 });
});
});
it('Non string icc', function () {
assert.throws(function () {
sharp().withMetadata({ icc: true });

View File

@@ -18,7 +18,9 @@ describe('Modulate', function () {
{ saturation: null },
{ hue: '50deg' },
{ hue: 1.5 },
{ hue: null }
{ hue: null },
{ lightness: '+50' },
{ lightness: null }
].forEach(function (options) {
it('should throw', function () {
assert.throws(function () {
@@ -108,6 +110,22 @@ describe('Modulate', function () {
assert.deepStrictEqual({ r: 127, g: 83, b: 81 }, { r, g, b });
});
it('should be able to lighten', async () => {
const [r, g, b] = await sharp({
create: {
width: 1,
height: 1,
channels: 3,
background: { r: 153, g: 68, b: 68 }
}
})
.modulate({ lightness: 10 })
.raw()
.toBuffer();
assert.deepStrictEqual({ r: 182, g: 93, b: 92 }, { r, g, b });
});
it('should be able to modulate all channels', async () => {
const [r, g, b] = await sharp({
create: {

View File

@@ -107,4 +107,88 @@ describe('Negate', function () {
done();
});
});
it('negate ({alpha: true})', function (done) {
sharp(fixtures.inputJpg)
.resize(320, 240)
.negate({ alpha: true })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('jpeg', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate.jpg'), data, done);
});
});
it('negate non-alpha channels (png)', function (done) {
sharp(fixtures.inputPng)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('png', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha.png'), data, done);
});
});
it('negate non-alpha channels (png, trans)', function (done) {
sharp(fixtures.inputPngWithTransparency)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('png', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha-trans.png'), data, done);
});
});
it('negate non-alpha channels (png, alpha)', function (done) {
sharp(fixtures.inputPngWithGreyAlpha)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('png', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha-grey.png'), data, done);
});
});
it('negate non-alpha channels (webp)', function (done) {
sharp(fixtures.inputWebP)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('webp', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha.webp'), data, done);
});
});
it('negate non-alpha channels (webp, trans)', function (done) {
sharp(fixtures.inputWebPWithTransparency)
.resize(320, 240)
.negate({ alpha: false })
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual('webp', info.format);
assert.strictEqual(320, info.width);
assert.strictEqual(240, info.height);
fixtures.assertSimilar(fixtures.expected('negate-preserve-alpha-trans.webp'), data, done);
});
});
it('invalid alpha value', function () {
assert.throws(function () {
sharp(fixtures.inputWebPWithTransparency).negate({ alpha: 'non-bool' });
});
});
});

View File

@@ -32,23 +32,21 @@ describe('Platform-detection', function () {
delete process.env.npm_config_arch;
});
it('Can detect ARM version via process.config', function () {
process.env.npm_config_arch = 'arm';
const armVersion = process.config.variables.arm_version;
process.config.variables.arm_version = 'test';
assert.strictEqual('armvtest', platform().split('-')[1]);
process.config.variables.arm_version = armVersion;
delete process.env.npm_config_arch;
});
if (process.config.variables.arm_version) {
it('Can detect ARM version via process.config', function () {
process.env.npm_config_arch = 'arm';
assert.strictEqual(`armv${process.config.variables.arm_version}`, platform().split('-')[1]);
delete process.env.npm_config_arch;
});
}
it('Defaults to ARMv6 for 32-bit', function () {
process.env.npm_config_arch = 'arm';
const armVersion = process.config.variables.arm_version;
delete process.config.variables.arm_version;
assert.strictEqual('armv6', platform().split('-')[1]);
process.config.variables.arm_version = armVersion;
delete process.env.npm_config_arch;
});
if (!process.config.variables.arm_version) {
it('Defaults to ARMv6 for 32-bit', function () {
process.env.npm_config_arch = 'arm';
assert.strictEqual('armv6', platform().split('-')[1]);
delete process.env.npm_config_arch;
});
}
it('Defaults to ARMv8 for 64-bit', function () {
process.env.npm_config_arch = 'arm64';

View File

@@ -7,6 +7,18 @@ const fixtures = require('../fixtures');
describe('Raw pixel data', function () {
describe('Raw pixel input', function () {
it('Empty data', function () {
assert.throws(function () {
sharp(Buffer.from(''));
}, /empty/);
assert.throws(function () {
sharp(new Uint8Array(0));
}, /empty/);
assert.throws(function () {
sharp(new Uint8ClampedArray(0));
}, /empty/);
});
it('Missing options', function () {
assert.throws(function () {
sharp({ raw: {} });
@@ -95,6 +107,52 @@ describe('Raw pixel data', function () {
});
});
it('RGBA premultiplied', function (done) {
// Convert to raw pixel data
sharp(fixtures.inputPngSolidAlpha)
.resize(256)
.raw()
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(256, info.width);
assert.strictEqual(192, info.height);
assert.strictEqual(4, info.channels);
const originalData = Buffer.from(data);
// Premultiply image data
for (let i = 0; i < data.length; i += 4) {
const alpha = data[i + 3];
const norm = alpha / 255;
if (alpha < 255) {
data[i] = Math.round(data[i] * norm);
data[i + 1] = Math.round(data[i + 1] * norm);
data[i + 2] = Math.round(data[i + 2] * norm);
}
}
// Convert back to PNG
sharp(data, {
raw: {
width: info.width,
height: info.height,
channels: info.channels,
premultiplied: true
}
})
.raw()
.toBuffer(function (err, data, info) {
if (err) throw err;
assert.strictEqual(256, info.width);
assert.strictEqual(192, info.height);
assert.strictEqual(4, info.channels);
assert.equal(data.compare(originalData), 0, 'output buffer matches unpremultiplied input buffer');
done();
});
});
});
it('JPEG to raw Stream and back again', function (done) {
const width = 32;
const height = 24;
@@ -121,7 +179,7 @@ describe('Raw pixel data', function () {
});
});
describe('Ouput raw, uncompressed image data', function () {
describe('Output raw, uncompressed image data', function () {
it('1 channel greyscale image', function (done) {
sharp(fixtures.inputJpg)
.greyscale()
@@ -169,7 +227,7 @@ describe('Raw pixel data', function () {
});
});
it('extract A from RGBA', () =>
it('Extract A from RGBA', () =>
sharp(fixtures.inputPngWithTransparency)
.resize(32, 24)
.extractChannel(3)
@@ -183,4 +241,41 @@ describe('Raw pixel data', function () {
})
);
});
describe('Raw pixel depths', function () {
it('Invalid depth', function () {
assert.throws(function () {
sharp(Buffer.alloc(3), { raw: { width: 1, height: 1, channels: 3 } })
.raw({ depth: 'zoinks' });
});
});
for (const { constructor, depth, bits } of [
{ constructor: Uint8Array, depth: undefined, bits: 8 },
{ constructor: Uint8Array, depth: 'uchar', bits: 8 },
{ constructor: Uint8ClampedArray, depth: 'uchar', bits: 8 },
{ constructor: Int8Array, depth: 'char', bits: 8 },
{ constructor: Uint16Array, depth: 'ushort', bits: 16 },
{ constructor: Int16Array, depth: 'short', bits: 16 },
{ constructor: Uint32Array, depth: 'uint', bits: 32 },
{ constructor: Int32Array, depth: 'int', bits: 32 },
{ constructor: Float32Array, depth: 'float', bits: 32 },
{ constructor: Float64Array, depth: 'double', bits: 64 }
]) {
it(constructor.name, () =>
sharp(new constructor(3), { raw: { width: 1, height: 1, channels: 3 } })
.raw({ depth })
.toBuffer({ resolveWithObject: true })
.then(({ data, info }) => {
assert.strictEqual(1, info.width);
assert.strictEqual(1, info.height);
assert.strictEqual(3, info.channels);
if (depth !== undefined) {
assert.strictEqual(depth, info.depth);
}
assert.strictEqual(data.length / 3, bits / 8);
})
);
}
});
});

View File

@@ -605,6 +605,60 @@ describe('Resize dimensions', function () {
});
});
it('Ensure embedded shortest edge (height) is at least 1 pixel', function () {
return sharp({
create: {
width: 200,
height: 1,
channels: 3,
background: 'red'
}
})
.resize({ width: 50, height: 50, fit: sharp.fit.contain })
.toBuffer({ resolveWithObject: true })
.then(function (output) {
assert.strictEqual(50, output.info.width);
assert.strictEqual(50, output.info.height);
});
});
it('Ensure embedded shortest edge (width) is at least 1 pixel', function () {
return sharp({
create: {
width: 1,
height: 200,
channels: 3,
background: 'red'
}
})
.resize({ width: 50, height: 50, fit: sharp.fit.contain })
.toBuffer({ resolveWithObject: true })
.then(function (output) {
assert.strictEqual(50, output.info.width);
assert.strictEqual(50, output.info.height);
});
});
it('Skip shrink-on-load where one dimension <4px', async () => {
const jpeg = await sharp({
create: {
width: 100,
height: 3,
channels: 3,
background: 'red'
}
})
.jpeg()
.toBuffer();
const { info } = await sharp(jpeg)
.resize(8)
.toBuffer({ resolveWithObject: true });
assert.strictEqual(info.width, 8);
assert.strictEqual(info.height, 1);
});
it('unknown kernel throws', function () {
assert.throws(function () {
sharp().resize(null, null, { kernel: 'unknown' });

View File

@@ -423,20 +423,20 @@ describe('Image Stats', function () {
assert.strictEqual(true, isInRange(stats.channels[0].maxY, 0, 1));
// alpha channel
assert.strictEqual(0, stats.channels[1].min);
assert.strictEqual(255, stats.channels[1].max);
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].sum, 255));
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].squaresSum, 65025));
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].mean, 127.5));
assert.strictEqual(true, isInAcceptableRange(stats.channels[1].stdev, 180.31222920256963));
assert.strictEqual(true, isInteger(stats.channels[1].minX));
assert.strictEqual(true, isInRange(stats.channels[1].minX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[1].minY));
assert.strictEqual(true, isInRange(stats.channels[1].minY, 0, 1));
assert.strictEqual(true, isInteger(stats.channels[1].maxX));
assert.strictEqual(true, isInRange(stats.channels[1].maxX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[1].maxY));
assert.strictEqual(true, isInRange(stats.channels[1].maxY, 0, 1));
assert.strictEqual(0, stats.channels[3].min);
assert.strictEqual(255, stats.channels[3].max);
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].sum, 255));
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].squaresSum, 65025));
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].mean, 127.5));
assert.strictEqual(true, isInAcceptableRange(stats.channels[3].stdev, 180.31222920256963));
assert.strictEqual(true, isInteger(stats.channels[3].minX));
assert.strictEqual(true, isInRange(stats.channels[3].minX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[3].minY));
assert.strictEqual(true, isInRange(stats.channels[3].minY, 0, 1));
assert.strictEqual(true, isInteger(stats.channels[3].maxX));
assert.strictEqual(true, isInRange(stats.channels[3].maxX, 0, 2));
assert.strictEqual(true, isInteger(stats.channels[3].maxY));
assert.strictEqual(true, isInRange(stats.channels[3].maxY, 0, 1));
done();
});

View File

@@ -188,6 +188,26 @@ describe('TIFF', function () {
)
);
it('TIFF imputes xres and yres from withMetadataDensity if not explicitly provided', async () => {
const data = await sharp(fixtures.inputTiff)
.resize(8, 8)
.tiff()
.withMetadata({ density: 600 })
.toBuffer();
const { density } = await sharp(data).metadata();
assert.strictEqual(600, density);
});
it('TIFF uses xres and yres over withMetadataDensity if explicitly provided', async () => {
const data = await sharp(fixtures.inputTiff)
.resize(8, 8)
.tiff({ xres: 1000, yres: 1000 })
.withMetadata({ density: 600 })
.toBuffer();
const { density } = await sharp(data).metadata();
assert.strictEqual(25400, density);
});
it('TIFF invalid xres value should throw an error', function () {
assert.throws(function () {
sharp().tiff({ xres: '1000.0' });

Some files were not shown because too many files have changed in this diff Show More