Compare commits
97 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
34d5252242 | ||
|
|
f31e4d2869 | ||
|
|
c695c40abc | ||
|
|
fd1ca1dbb2 | ||
|
|
f25dbd5f61 | ||
|
|
541e7104fd | ||
|
|
94945cf6ac | ||
|
|
db76e655f8 | ||
|
|
d43c7b581d | ||
|
|
383b933e26 | ||
|
|
d26ccf6294 | ||
|
|
6f9699f605 | ||
|
|
1e9093d781 | ||
|
|
9dc6492e52 | ||
|
|
d22f7cae6a | ||
|
|
473afaab45 | ||
|
|
dcd68303a4 | ||
|
|
03394556b5 | ||
|
|
1c4f6f75f3 | ||
|
|
f00928dedb | ||
|
|
a48f8fbb61 | ||
|
|
1fa388370e | ||
|
|
95ef6b3f71 | ||
|
|
de11d36d00 | ||
|
|
d77c2adabe | ||
|
|
c89c055ae0 | ||
|
|
dac8117f32 | ||
|
|
937b091bab | ||
|
|
019e6a1bfe | ||
|
|
1565e58fcf | ||
|
|
c22e2a17ef | ||
|
|
fd2a10ccea | ||
|
|
0725378257 | ||
|
|
c431909f35 | ||
|
|
db4df6f0b2 | ||
|
|
17f942c802 | ||
|
|
60438ebfe5 | ||
|
|
21fbe546b8 | ||
|
|
11900945eb | ||
|
|
ea5270221b | ||
|
|
a64844689e | ||
|
|
6007e13a22 | ||
|
|
c3274e480b | ||
|
|
3c54eeda5b | ||
|
|
6236e4b97d | ||
|
|
796738da65 | ||
|
|
37d385fafa | ||
|
|
db2af42ee7 | ||
|
|
24b42ef192 | ||
|
|
2ce166ab0a | ||
|
|
71755b69e4 | ||
|
|
1106aac2d8 | ||
|
|
93aac660a3 | ||
|
|
0ce8ad7130 | ||
|
|
deacd553bf | ||
|
|
c8ff7e11a9 | ||
|
|
4cff62258c | ||
|
|
0144358afb | ||
|
|
136097efe7 | ||
|
|
374c6959d7 | ||
|
|
7d48a5ccf4 | ||
|
|
bf3254cb16 | ||
|
|
5bed3a7d52 | ||
|
|
ece111280b | ||
|
|
a15a9b956b | ||
|
|
42860c2f83 | ||
|
|
b5b95e5ae1 | ||
|
|
d705cffdd6 | ||
|
|
23a4bc103e | ||
|
|
c14434f9e7 | ||
|
|
25bd2cea3e | ||
|
|
532de4ecab | ||
|
|
bfdd27eeef | ||
|
|
bd9f238ab4 | ||
|
|
75556bb57c | ||
|
|
2de062a34a | ||
|
|
4589b15dea | ||
|
|
8b75ce6786 | ||
|
|
7bbc5176a1 | ||
|
|
5cb35485f1 | ||
|
|
80189ed689 | ||
|
|
3d7e8ef432 | ||
|
|
1999c7103c | ||
|
|
9c20ae383e | ||
|
|
76c41eaf05 | ||
|
|
873aa6700f | ||
|
|
0d9590a9a0 | ||
|
|
94607b585a | ||
|
|
da0b0348a2 | ||
|
|
09263455b5 | ||
|
|
ddc23493d4 | ||
|
|
54a71fc142 | ||
|
|
b1a9bf10a2 | ||
|
|
97cfbe1b63 | ||
|
|
0ee8c63551 | ||
|
|
0ac5a9ad82 | ||
|
|
6e51f2d608 |
@@ -8,6 +8,7 @@ test
|
||||
.travis.yml
|
||||
appveyor.yml
|
||||
mkdocs.yml
|
||||
docs/css/
|
||||
vendor
|
||||
.prebuildrc
|
||||
.nyc_output
|
||||
|
||||
89
.travis.yml
@@ -1,34 +1,83 @@
|
||||
language: node_js
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: false
|
||||
node_js: "4"
|
||||
- os: linux
|
||||
- name: "Linux (glibc) - Node 6"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: false
|
||||
language: node_js
|
||||
node_js: "6"
|
||||
- os: linux
|
||||
- name: "Linux (glibc) - Node 8"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: false
|
||||
language: node_js
|
||||
node_js: "8"
|
||||
- os: linux
|
||||
- name: "Linux (glibc) - Node 10"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: false
|
||||
language: node_js
|
||||
node_js: "10"
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
node_js: "4"
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
after_success:
|
||||
- npm install coveralls
|
||||
- cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js
|
||||
- name: "Linux (glibc) - Node 11 (Experimental)"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: false
|
||||
language: node_js
|
||||
node_js: "11"
|
||||
before_install:
|
||||
- unset PREBUILD_TOKEN
|
||||
- name: "Linux (musl) - Node 8"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: true
|
||||
language: minimal
|
||||
before_install:
|
||||
- sudo docker run -dit --name sharp --env CI --env PREBUILD_TOKEN --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:8-alpine
|
||||
- sudo docker exec sharp apk add build-base git python2 --update-cache
|
||||
install: sudo docker exec sharp sh -c "npm install --unsafe-perm"
|
||||
script: sudo docker exec sharp sh -c "npm test"
|
||||
- name: "Linux (musl) - Node 10"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: true
|
||||
language: minimal
|
||||
before_install:
|
||||
- sudo docker run -dit --name sharp --env CI --env PREBUILD_TOKEN --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:10-alpine
|
||||
- sudo docker exec sharp apk add build-base git python2 --update-cache
|
||||
install: sudo docker exec sharp sh -c "npm install --unsafe-perm"
|
||||
script: sudo docker exec sharp sh -c "npm test"
|
||||
- name: "Linux (musl) - Node 11 (Experimental)"
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: true
|
||||
language: minimal
|
||||
before_install:
|
||||
- sudo docker run -dit --name sharp --env CI --volume "${PWD}:/mnt/sharp" --workdir /mnt/sharp node:11-alpine
|
||||
- sudo docker exec sharp apk add build-base git python2 --update-cache
|
||||
install: sudo docker exec sharp sh -c "npm install --unsafe-perm"
|
||||
script: sudo docker exec sharp sh -c "npm test"
|
||||
- name: "OS X - Node 6"
|
||||
os: osx
|
||||
osx_image: xcode9.2
|
||||
language: node_js
|
||||
node_js: "6"
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
- name: "OS X - Node 8"
|
||||
os: osx
|
||||
osx_image: xcode9.2
|
||||
language: node_js
|
||||
node_js: "8"
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
- name: "OS X - Node 10"
|
||||
os: osx
|
||||
osx_image: xcode9.2
|
||||
language: node_js
|
||||
node_js: "10"
|
||||
after_success:
|
||||
- npm install coveralls
|
||||
- cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js
|
||||
- name: "OS X - Node 11 (Experimental)"
|
||||
os: osx
|
||||
osx_image: xcode9.2
|
||||
language: node_js
|
||||
node_js: "11"
|
||||
before_install:
|
||||
- unset PREBUILD_TOKEN
|
||||
|
||||
@@ -14,7 +14,7 @@ New bugs are assigned a `triage` label whilst under investigation.
|
||||
|
||||
If a [similar request](https://github.com/lovell/sharp/labels/enhancement) exists, it's probably fastest to add a comment to it about your requirement.
|
||||
|
||||
Implementation is usually straightforward if _libvips_ [already supports](https://jcupitt.github.io/libvips/API/current/) the feature you need.
|
||||
Implementation is usually straightforward if _libvips_ [already supports](https://libvips.github.io/libvips/API/current/) the feature you need.
|
||||
|
||||
## Submit a Pull Request to fix a bug
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@ Lanczos resampling ensures quality is not sacrificed for speed.
|
||||
As well as image resizing, operations such as
|
||||
rotation, extraction, compositing and gamma correction are available.
|
||||
|
||||
Most modern 64-bit OS X, Windows and Linux (glibc) systems running
|
||||
Node versions 4, 6, 8 and 10
|
||||
Most modern 64-bit OS X, Windows and Linux systems running
|
||||
Node versions 6, 8 and 10
|
||||
do not require any additional install or runtime dependencies.
|
||||
|
||||
## Examples
|
||||
@@ -48,7 +48,7 @@ sharp('input.jpg')
|
||||
```
|
||||
|
||||
```javascript
|
||||
const roundedCorners = new Buffer(
|
||||
const roundedCorners = Buffer.from(
|
||||
'<svg><rect x="0" y="0" width="200" height="200" rx="50" ry="50"/></svg>'
|
||||
);
|
||||
|
||||
@@ -78,7 +78,7 @@ Visit [sharp.pixelplumbing.com](http://sharp.pixelplumbing.com/) for complete
|
||||
A [guide for contributors](https://github.com/lovell/sharp/blob/master/CONTRIBUTING.md)
|
||||
covers reporting bugs, requesting features and submitting code changes.
|
||||
|
||||
### Licence
|
||||
### Licensing
|
||||
|
||||
Copyright 2013, 2014, 2015, 2016, 2017, 2018 Lovell Fuller and contributors.
|
||||
|
||||
|
||||
@@ -4,10 +4,11 @@ build: off
|
||||
platform: x64
|
||||
environment:
|
||||
matrix:
|
||||
- nodejs_version: "4"
|
||||
- nodejs_version: "6"
|
||||
- nodejs_version: "8"
|
||||
- nodejs_version: "10"
|
||||
- nodejs_version: "11"
|
||||
PREBUILD_TOKEN: ""
|
||||
install:
|
||||
- ps: Install-Product node $env:nodejs_version x64
|
||||
- npm install -g npm@5
|
||||
|
||||
17
binding.gyp
@@ -128,9 +128,11 @@
|
||||
'../vendor/lib/libcairo.so',
|
||||
'../vendor/lib/libcroco-0.6.so',
|
||||
'../vendor/lib/libexif.so',
|
||||
'../vendor/lib/libexpat.so',
|
||||
'../vendor/lib/libffi.so',
|
||||
'../vendor/lib/libfontconfig.so',
|
||||
'../vendor/lib/libfreetype.so',
|
||||
'../vendor/lib/libfribidi.so',
|
||||
'../vendor/lib/libgdk_pixbuf-2.0.so',
|
||||
'../vendor/lib/libgif.so',
|
||||
'../vendor/lib/libgio-2.0.so',
|
||||
@@ -138,6 +140,7 @@
|
||||
'../vendor/lib/libgsf-1.so',
|
||||
'../vendor/lib/libgthread-2.0.so',
|
||||
'../vendor/lib/libharfbuzz.so',
|
||||
'../vendor/lib/libharfbuzz-subset.so.0',
|
||||
'../vendor/lib/libjpeg.so',
|
||||
'../vendor/lib/liblcms2.so',
|
||||
'../vendor/lib/liborc-0.4.so',
|
||||
@@ -149,6 +152,8 @@
|
||||
'../vendor/lib/librsvg-2.so',
|
||||
'../vendor/lib/libtiff.so',
|
||||
'../vendor/lib/libwebp.so',
|
||||
'../vendor/lib/libwebpdemux.so',
|
||||
'../vendor/lib/libwebpmux.so',
|
||||
'../vendor/lib/libxml2.so',
|
||||
'../vendor/lib/libz.so',
|
||||
# Ensure runtime linking is relative to sharp.node
|
||||
@@ -178,13 +183,23 @@
|
||||
},
|
||||
'configurations': {
|
||||
'Release': {
|
||||
'cflags_cc': [
|
||||
'-Wno-cast-function-type',
|
||||
'-Wno-deprecated-declarations'
|
||||
],
|
||||
'xcode_settings': {
|
||||
'OTHER_CPLUSPLUSFLAGS': [
|
||||
'-Wno-deprecated-declarations'
|
||||
]
|
||||
},
|
||||
'msvs_settings': {
|
||||
'VCCLCompilerTool': {
|
||||
'ExceptionHandling': 1
|
||||
}
|
||||
},
|
||||
'msvs_disabled_warnings': [
|
||||
4275
|
||||
4275,
|
||||
4996
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
## removeAlpha
|
||||
|
||||
- [extractChannel][1]
|
||||
- [joinChannel][2]
|
||||
- [bandbool][3]
|
||||
Remove alpha channel, if any. This is a no-op if the image does not have an alpha channel.
|
||||
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp('rgba.png')
|
||||
.removeAlpha()
|
||||
.toFile('rgb.png', function(err, info) {
|
||||
// rgb.png is a 3 channel image without an alpha channel
|
||||
});
|
||||
```
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## extractChannel
|
||||
|
||||
Extract a single channel from a multi-channel image.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `channel` **([Number][4] \| [String][5])** zero-indexed band number to extract, or `red`, `green` or `blue` as alternative to `0`, `1` or `2` respectively.
|
||||
- `channel` **([Number][1] \| [String][2])** zero-indexed band number to extract, or `red`, `green` or `blue` as alternative to `0`, `1` or `2` respectively.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
@@ -25,7 +35,7 @@ sharp(input)
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][6]** Invalid channel
|
||||
- Throws **[Error][3]** Invalid channel
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -42,13 +52,13 @@ Channel ordering follows vips convention:
|
||||
Buffers may be any of the image formats supported by sharp: JPEG, PNG, WebP, GIF, SVG, TIFF or raw pixel image data.
|
||||
For raw pixel input, the `options` object should contain a `raw` attribute, which follows the format of the attribute of the same name in the `sharp()` constructor.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `images` **([Array][7]<([String][5] \| [Buffer][8])> | [String][5] \| [Buffer][8])** one or more images (file paths, Buffers).
|
||||
- `options` **[Object][9]** image options, see `sharp()` constructor.
|
||||
- `images` **([Array][4]<([String][2] \| [Buffer][5])> | [String][2] \| [Buffer][5])** one or more images (file paths, Buffers).
|
||||
- `options` **[Object][6]** image options, see `sharp()` constructor.
|
||||
|
||||
|
||||
- Throws **[Error][6]** Invalid parameters
|
||||
- Throws **[Error][3]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -56,11 +66,11 @@ Returns **Sharp**
|
||||
|
||||
Perform a bitwise boolean operation on all input image channels (bands) to produce a single channel output image.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `boolOp` **[String][5]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
|
||||
- `boolOp` **[String][2]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp('3-channel-rgb-input.png')
|
||||
@@ -72,24 +82,18 @@ sharp('3-channel-rgb-input.png')
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][6]** Invalid parameters
|
||||
- Throws **[Error][3]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #extractchannel
|
||||
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[2]: #joinchannel
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[3]: #bandbool
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
|
||||
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
[5]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
|
||||
|
||||
[8]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
@@ -1,43 +1,16 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [background][1]
|
||||
- [tint][2]
|
||||
- [greyscale][3]
|
||||
- [grayscale][4]
|
||||
- [toColourspace][5]
|
||||
- [toColorspace][6]
|
||||
|
||||
## background
|
||||
|
||||
Set the background for the `embed`, `flatten` and `extend` operations.
|
||||
The default background is `{r: 0, g: 0, b: 0, alpha: 1}`, black without transparency.
|
||||
|
||||
Delegates to the _color_ module, which can throw an Error
|
||||
but is liberal in what it accepts, clipping values to sensible min/max.
|
||||
The alpha value is a float between `0` (transparent) and `1` (opaque).
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `rgba` **([String][7] \| [Object][8])** parsed by the [color][9] module to extract values for red, green, blue and alpha.
|
||||
|
||||
|
||||
- Throws **[Error][10]** Invalid parameter
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## tint
|
||||
|
||||
Tint the image using the provided chroma while preserving the image luminance.
|
||||
An alpha channel may be present and will be unchanged by the operation.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `rgb` **([String][7] \| [Object][8])** parsed by the [color][9] module to extract chroma values.
|
||||
- `rgb` **([String][1] \| [Object][2])** parsed by the [color][3] module to extract chroma values.
|
||||
|
||||
|
||||
- Throws **[Error][10]** Invalid parameter
|
||||
- Throws **[Error][4]** Invalid parameter
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -50,9 +23,9 @@ This may be overridden by other sharp operations such as `toColourspace('b-w')`,
|
||||
which will produce an output image containing one color channel.
|
||||
An alpha channel may be present, and will be unchanged by the operation.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `greyscale` **[Boolean][11]** (optional, default `true`)
|
||||
- `greyscale` **[Boolean][5]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -60,9 +33,9 @@ Returns **Sharp**
|
||||
|
||||
Alternative spelling of `greyscale`.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `grayscale` **[Boolean][11]** (optional, default `true`)
|
||||
- `grayscale` **[Boolean][5]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -71,12 +44,12 @@ Returns **Sharp**
|
||||
Set the output colourspace.
|
||||
By default output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `colourspace` **[String][7]?** output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][12]
|
||||
- `colourspace` **[String][1]?** output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][6]
|
||||
|
||||
|
||||
- Throws **[Error][10]** Invalid parameters
|
||||
- Throws **[Error][4]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -84,35 +57,23 @@ Returns **Sharp**
|
||||
|
||||
Alternative spelling of `toColourspace`.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `colorspace` **[String][7]?** output colorspace.
|
||||
- `colorspace` **[String][1]?** output colorspace.
|
||||
|
||||
|
||||
- Throws **[Error][10]** Invalid parameters
|
||||
- Throws **[Error][4]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #background
|
||||
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[2]: #tint
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[3]: #greyscale
|
||||
[3]: https://www.npmjs.org/package/color
|
||||
|
||||
[4]: #grayscale
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[5]: #tocolourspace
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[6]: #tocolorspace
|
||||
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[9]: https://www.npmjs.org/package/color
|
||||
|
||||
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[12]: https://github.com/jcupitt/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568
|
||||
[6]: https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [overlayWith][1]
|
||||
|
||||
## overlayWith
|
||||
|
||||
Overlay (composite) an image over the processed (resized, extracted etc.) image.
|
||||
@@ -13,27 +9,27 @@ If both `top` and `left` options are provided, they take precedence over `gravit
|
||||
|
||||
If the overlay image contains an alpha channel then composition with premultiplication will occur.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `overlay` **([Buffer][2] \| [String][3])** Buffer containing image data or String containing the path to an image file.
|
||||
- `options` **[Object][4]?**
|
||||
- `options.gravity` **[String][3]** gravity at which to place the overlay. (optional, default `'centre'`)
|
||||
- `options.top` **[Number][5]?** the pixel offset from the top edge.
|
||||
- `options.left` **[Number][5]?** the pixel offset from the left edge.
|
||||
- `options.tile` **[Boolean][6]** set to true to repeat the overlay image across the entire image with the given `gravity`. (optional, default `false`)
|
||||
- `options.cutout` **[Boolean][6]** set to true to apply only the alpha channel of the overlay image to the input image, giving the appearance of one image being cut out of another. (optional, default `false`)
|
||||
- `options.density` **[Number][5]** integral number representing the DPI for vector overlay image. (optional, default `72`)
|
||||
- `options.raw` **[Object][4]?** describes overlay when using raw pixel data.
|
||||
- `options.raw.width` **[Number][5]?**
|
||||
- `options.raw.height` **[Number][5]?**
|
||||
- `options.raw.channels` **[Number][5]?**
|
||||
- `options.create` **[Object][4]?** describes a blank overlay to be created.
|
||||
- `options.create.width` **[Number][5]?**
|
||||
- `options.create.height` **[Number][5]?**
|
||||
- `options.create.channels` **[Number][5]?** 3-4
|
||||
- `options.create.background` **([String][3] \| [Object][4])?** parsed by the [color][7] module to extract values for red, green, blue and alpha.
|
||||
- `overlay` **([Buffer][1] \| [String][2])** Buffer containing image data or String containing the path to an image file.
|
||||
- `options` **[Object][3]?**
|
||||
- `options.gravity` **[String][2]** gravity at which to place the overlay. (optional, default `'centre'`)
|
||||
- `options.top` **[Number][4]?** the pixel offset from the top edge.
|
||||
- `options.left` **[Number][4]?** the pixel offset from the left edge.
|
||||
- `options.tile` **[Boolean][5]** set to true to repeat the overlay image across the entire image with the given `gravity`. (optional, default `false`)
|
||||
- `options.cutout` **[Boolean][5]** set to true to apply only the alpha channel of the overlay image to the input image, giving the appearance of one image being cut out of another. (optional, default `false`)
|
||||
- `options.density` **[Number][4]** number representing the DPI for vector overlay image. (optional, default `72`)
|
||||
- `options.raw` **[Object][3]?** describes overlay when using raw pixel data.
|
||||
- `options.raw.width` **[Number][4]?**
|
||||
- `options.raw.height` **[Number][4]?**
|
||||
- `options.raw.channels` **[Number][4]?**
|
||||
- `options.create` **[Object][3]?** describes a blank overlay to be created.
|
||||
- `options.create.width` **[Number][4]?**
|
||||
- `options.create.height` **[Number][4]?**
|
||||
- `options.create.channels` **[Number][4]?** 3-4
|
||||
- `options.create.background` **([String][2] \| [Object][3])?** parsed by the [color][6] module to extract values for red, green, blue and alpha.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp('input.png')
|
||||
@@ -44,8 +40,7 @@ sharp('input.png')
|
||||
.overlayWith('overlay.png', { gravity: sharp.gravity.southeast } )
|
||||
.sharpen()
|
||||
.withMetadata()
|
||||
.quality(90)
|
||||
.webp()
|
||||
.webp( { quality: 90 } )
|
||||
.toBuffer()
|
||||
.then(function(outputBuffer) {
|
||||
// outputBuffer contains upside down, 300px wide, alpha channel flattened
|
||||
@@ -54,22 +49,20 @@ sharp('input.png')
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][8]** Invalid parameters
|
||||
- Throws **[Error][7]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #overlaywith
|
||||
[1]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[2]: https://nodejs.org/api/buffer.html
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
[6]: https://www.npmjs.org/package/color
|
||||
|
||||
[7]: https://www.npmjs.org/package/color
|
||||
|
||||
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
@@ -1,37 +1,30 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [Sharp][1]
|
||||
- [format][2]
|
||||
- [versions][3]
|
||||
- [queue][4]
|
||||
|
||||
## Sharp
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `input` **([Buffer][5] \| [String][6])?** if present, can be
|
||||
- `input` **([Buffer][1] \| [String][2])?** if present, can be
|
||||
a Buffer containing JPEG, PNG, WebP, GIF, SVG, TIFF or raw pixel image data, or
|
||||
a String containing the path to an JPEG, PNG, WebP, GIF, SVG or TIFF image file.
|
||||
JPEG, PNG, WebP, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
|
||||
- `options` **[Object][7]?** if present, is an Object with optional attributes.
|
||||
- `options.failOnError` **[Boolean][8]** by default apply a "best effort"
|
||||
- `options` **[Object][3]?** if present, is an Object with optional attributes.
|
||||
- `options.failOnError` **[Boolean][4]** by default apply a "best effort"
|
||||
to decode images, even if the data is corrupt or invalid. Set this flag to true
|
||||
if you'd rather halt processing and raise an error when loading invalid images. (optional, default `false`)
|
||||
- `options.density` **[Number][9]** integral number representing the DPI for vector images. (optional, default `72`)
|
||||
- `options.page` **[Number][9]** page number to extract for multi-page input (GIF, TIFF) (optional, default `0`)
|
||||
- `options.raw` **[Object][7]?** describes raw pixel input image data. See `raw()` for pixel ordering.
|
||||
- `options.raw.width` **[Number][9]?**
|
||||
- `options.raw.height` **[Number][9]?**
|
||||
- `options.raw.channels` **[Number][9]?** 1-4
|
||||
- `options.create` **[Object][7]?** describes a new image to be created.
|
||||
- `options.create.width` **[Number][9]?**
|
||||
- `options.create.height` **[Number][9]?**
|
||||
- `options.create.channels` **[Number][9]?** 3-4
|
||||
- `options.create.background` **([String][6] \| [Object][7])?** parsed by the [color][10] module to extract values for red, green, blue and alpha.
|
||||
- `options.density` **[Number][5]** number representing the DPI for vector images. (optional, default `72`)
|
||||
- `options.page` **[Number][5]** page number to extract for multi-page input (GIF, TIFF) (optional, default `0`)
|
||||
- `options.raw` **[Object][3]?** describes raw pixel input image data. See `raw()` for pixel ordering.
|
||||
- `options.raw.width` **[Number][5]?**
|
||||
- `options.raw.height` **[Number][5]?**
|
||||
- `options.raw.channels` **[Number][5]?** 1-4
|
||||
- `options.create` **[Object][3]?** describes a new image to be created.
|
||||
- `options.create.width` **[Number][5]?**
|
||||
- `options.create.height` **[Number][5]?**
|
||||
- `options.create.channels` **[Number][5]?** 3-4
|
||||
- `options.create.background` **([String][2] \| [Object][3])?** parsed by the [color][6] module to extract values for red, green, blue and alpha.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp('input.jpg')
|
||||
@@ -62,7 +55,7 @@ sharp({
|
||||
width: 300,
|
||||
height: 200,
|
||||
channels: 4,
|
||||
background: { r: 255, g: 0, b: 0, alpha: 128 }
|
||||
background: { r: 255, g: 0, b: 0, alpha: 0.5 }
|
||||
}
|
||||
})
|
||||
.png()
|
||||
@@ -70,27 +63,27 @@ sharp({
|
||||
.then( ... );
|
||||
```
|
||||
|
||||
- Throws **[Error][11]** Invalid parameters
|
||||
- Throws **[Error][7]** Invalid parameters
|
||||
|
||||
Returns **[Sharp][12]**
|
||||
Returns **[Sharp][8]**
|
||||
|
||||
### format
|
||||
|
||||
An Object containing nested boolean values representing the available input and output formats/methods.
|
||||
|
||||
**Examples**
|
||||
#### Examples
|
||||
|
||||
```javascript
|
||||
console.log(sharp.format);
|
||||
```
|
||||
|
||||
Returns **[Object][7]**
|
||||
Returns **[Object][3]**
|
||||
|
||||
### versions
|
||||
|
||||
An Object containing the version numbers of libvips and its dependencies.
|
||||
|
||||
**Examples**
|
||||
#### Examples
|
||||
|
||||
```javascript
|
||||
console.log(sharp.versions);
|
||||
@@ -103,7 +96,7 @@ An EventEmitter that emits a `change` event when a task is either:
|
||||
- queued, waiting for _libuv_ to provide a worker thread
|
||||
- complete
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp.queue.on('change', function(queueLength) {
|
||||
@@ -111,26 +104,18 @@ sharp.queue.on('change', function(queueLength) {
|
||||
});
|
||||
```
|
||||
|
||||
[1]: #sharp
|
||||
[1]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[2]: #format
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[3]: #versions
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[4]: #queue
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[5]: https://nodejs.org/api/buffer.html
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
[6]: https://www.npmjs.org/package/color
|
||||
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[10]: https://www.npmjs.org/package/color
|
||||
|
||||
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[12]: #sharp
|
||||
[8]: #sharp
|
||||
|
||||
@@ -1,20 +1,12 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [clone][1]
|
||||
- [metadata][2]
|
||||
- [stats][3]
|
||||
- [limitInputPixels][4]
|
||||
- [sequentialRead][5]
|
||||
|
||||
## clone
|
||||
|
||||
Take a "snapshot" of the Sharp instance, returning a new instance.
|
||||
Cloned instances inherit the input of their parent instance.
|
||||
This allows multiple output Streams and therefore multiple processing pipelines to share a single input Stream.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const pipeline = sharp().rotate();
|
||||
@@ -33,25 +25,28 @@ Fast access to (uncached) image metadata without decoding any compressed image d
|
||||
A Promises/A+ promise is returned when `callback` is not provided.
|
||||
|
||||
- `format`: Name of decoder used to decompress image data e.g. `jpeg`, `png`, `webp`, `gif`, `svg`
|
||||
- `size`: Total size of image in bytes, for Stream and Buffer input only
|
||||
- `width`: Number of pixels wide (EXIF orientation is not taken into consideration)
|
||||
- `height`: Number of pixels high (EXIF orientation is not taken into consideration)
|
||||
- `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][6]
|
||||
- `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...][1]
|
||||
- `channels`: Number of bands e.g. `3` for sRGB, `4` for CMYK
|
||||
- `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...][7]
|
||||
- `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...][2]
|
||||
- `density`: Number of pixels per inch (DPI), if present
|
||||
- `chromaSubsampling`: String containing JPEG chroma subsampling, `4:2:0` or `4:4:4` for RGB, `4:2:0:4` or `4:4:4:4` for CMYK
|
||||
- `isProgressive`: Boolean indicating whether the image is interlaced using a progressive scan
|
||||
- `hasProfile`: Boolean indicating the presence of an embedded ICC profile
|
||||
- `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
|
||||
- `orientation`: Number value of the EXIF Orientation header, if present
|
||||
- `exif`: Buffer containing raw EXIF data, if present
|
||||
- `icc`: Buffer containing raw [ICC][8] profile data, if present
|
||||
- `icc`: Buffer containing raw [ICC][3] profile data, if present
|
||||
- `iptc`: Buffer containing raw IPTC data, if present
|
||||
- `xmp`: Buffer containing raw XMP data, if present
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `callback` **[Function][9]?** called with the arguments `(err, metadata)`
|
||||
- `callback` **[Function][4]?** called with the arguments `(err, metadata)`
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const image = sharp(inputJpg);
|
||||
@@ -68,7 +63,7 @@ image
|
||||
});
|
||||
```
|
||||
|
||||
Returns **([Promise][10]<[Object][11]> | Sharp)**
|
||||
Returns **([Promise][5]<[Object][6]> | Sharp)**
|
||||
|
||||
## stats
|
||||
|
||||
@@ -87,12 +82,13 @@ A Promise is returned when `callback` is not provided.
|
||||
- `maxX` (x-coordinate of one of the pixel where the maximum lies)
|
||||
- `maxY` (y-coordinate of one of the pixel where the maximum lies)
|
||||
- `isOpaque`: Value to identify if the image is opaque or transparent, based on the presence and use of alpha channel
|
||||
- `entropy`: Histogram-based estimation of greyscale entropy, discarding alpha channel if any (experimental)
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `callback` **[Function][9]?** called with the arguments `(err, stats)`
|
||||
- `callback` **[Function][4]?** called with the arguments `(err, stats)`
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const image = sharp(inputJpg);
|
||||
@@ -103,7 +99,7 @@ image
|
||||
});
|
||||
```
|
||||
|
||||
Returns **[Promise][10]<[Object][11]>**
|
||||
Returns **[Promise][5]<[Object][6]>**
|
||||
|
||||
## limitInputPixels
|
||||
|
||||
@@ -111,12 +107,12 @@ Do not process input images where the number of pixels (width _ height) exceeds
|
||||
Assumes image dimensions contained in the input metadata can be trusted.
|
||||
The default limit is 268402689 (0x3FFF _ 0x3FFF) pixels.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `limit` **([Number][12] \| [Boolean][13])** an integral Number of pixels, zero or false to remove limit, true to use default limit.
|
||||
- `limit` **([Number][7] \| [Boolean][8])** an integral Number of pixels, zero or false to remove limit, true to use default limit.
|
||||
|
||||
|
||||
- Throws **[Error][14]** Invalid limit
|
||||
- Throws **[Error][9]** Invalid limit
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -127,36 +123,26 @@ This will reduce memory usage and can improve performance on some systems.
|
||||
|
||||
The default behaviour _before_ function call is `false`, meaning the libvips access method is not sequential.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `sequentialRead` **[Boolean][13]** (optional, default `true`)
|
||||
- `sequentialRead` **[Boolean][8]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #clone
|
||||
[1]: https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L636
|
||||
|
||||
[2]: #metadata
|
||||
[2]: https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L672
|
||||
|
||||
[3]: #stats
|
||||
[3]: https://www.npmjs.com/package/icc
|
||||
|
||||
[4]: #limitinputpixels
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Statements/function
|
||||
|
||||
[5]: #sequentialread
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise
|
||||
|
||||
[6]: https://github.com/jcupitt/libvips/blob/master/libvips/iofuncs/enumtypes.c#L636
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[7]: https://github.com/jcupitt/libvips/blob/master/libvips/iofuncs/enumtypes.c#L672
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[8]: https://www.npmjs.com/package/icc
|
||||
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Statements/function
|
||||
|
||||
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise
|
||||
|
||||
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[12]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[13]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[14]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
@@ -1,34 +1,16 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [rotate][1]
|
||||
- [extract][2]
|
||||
- [flip][3]
|
||||
- [flop][4]
|
||||
- [sharpen][5]
|
||||
- [median][6]
|
||||
- [blur][7]
|
||||
- [extend][8]
|
||||
- [flatten][9]
|
||||
- [trim][10]
|
||||
- [gamma][11]
|
||||
- [negate][12]
|
||||
- [normalise][13]
|
||||
- [normalize][14]
|
||||
- [convolve][15]
|
||||
- [threshold][16]
|
||||
- [boolean][17]
|
||||
- [linear][18]
|
||||
|
||||
## rotate
|
||||
|
||||
Rotate the output image by either an explicit angle
|
||||
or auto-orient based on the EXIF `Orientation` tag.
|
||||
|
||||
If an angle is provided, it is converted to a valid 90/180/270deg rotation.
|
||||
If an angle is provided, it is converted to a valid positive degree rotation.
|
||||
For example, `-450` will produce a 270deg rotation.
|
||||
|
||||
When rotating by an angle other than a multiple of 90,
|
||||
the background colour can be provided with the `background` option.
|
||||
|
||||
If no angle is provided, it is determined from the EXIF data.
|
||||
Mirroring is supported and may infer the use of a flip operation.
|
||||
|
||||
@@ -37,11 +19,13 @@ The use of `rotate` implies the removal of the EXIF `Orientation` tag, if any.
|
||||
Method order is important when both rotating and extracting regions,
|
||||
for example `rotate(x).extract(y)` will produce a different result to `extract(y).rotate(x)`.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `angle` **[Number][19]** angle of rotation, must be a multiple of 90. (optional, default `auto`)
|
||||
- `angle` **[Number][1]** angle of rotation. (optional, default `auto`)
|
||||
- `options` **[Object][2]?** if present, is an Object with optional attributes.
|
||||
- `options.background` **([String][3] \| [Object][2])** parsed by the [color][4] module to extract values for red, green, blue and alpha. (optional, default `"#000000"`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const pipeline = sharp()
|
||||
@@ -55,47 +39,7 @@ const pipeline = sharp()
|
||||
readableStream.pipe(pipeline);
|
||||
```
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## extract
|
||||
|
||||
Extract a region of the image.
|
||||
|
||||
- Use `extract` before `resize` for pre-resize extraction.
|
||||
- Use `extract` after `resize` for post-resize extraction.
|
||||
- Use `extract` before and after for both.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `options` **[Object][21]**
|
||||
- `options.left` **[Number][19]** zero-indexed offset from left edge
|
||||
- `options.top` **[Number][19]** zero-indexed offset from top edge
|
||||
- `options.width` **[Number][19]** dimension of extracted image
|
||||
- `options.height` **[Number][19]** dimension of extracted image
|
||||
|
||||
**Examples**
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.extract({ left: left, top: top, width: width, height: height })
|
||||
.toFile(output, function(err) {
|
||||
// Extract a region of the input image, saving in the same format.
|
||||
});
|
||||
```
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.extract({ left: leftOffsetPre, top: topOffsetPre, width: widthPre, height: heightPre })
|
||||
.resize(width, height)
|
||||
.extract({ left: leftOffsetPost, top: topOffsetPost, width: widthPost, height: heightPost })
|
||||
.toFile(output, function(err) {
|
||||
// Extract a region, resize, then extract from the resized image
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -104,9 +48,9 @@ Returns **Sharp**
|
||||
Flip the image about the vertical Y axis. This always occurs after rotation, if any.
|
||||
The use of `flip` implies the removal of the EXIF `Orientation` tag, if any.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `flip` **[Boolean][22]** (optional, default `true`)
|
||||
- `flip` **[Boolean][6]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -115,9 +59,9 @@ Returns **Sharp**
|
||||
Flop the image about the horizontal X axis. This always occurs after rotation, if any.
|
||||
The use of `flop` implies the removal of the EXIF `Orientation` tag, if any.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `flop` **[Boolean][22]** (optional, default `true`)
|
||||
- `flop` **[Boolean][6]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -128,14 +72,14 @@ When used without parameters, performs a fast, mild sharpen of the output image.
|
||||
When a `sigma` is provided, performs a slower, more accurate sharpen of the L channel in the LAB colour space.
|
||||
Separate control over the level of sharpening in "flat" and "jagged" areas is available.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `sigma` **[Number][19]?** the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
|
||||
- `flat` **[Number][19]** the level of sharpening to apply to "flat" areas. (optional, default `1.0`)
|
||||
- `jagged` **[Number][19]** the level of sharpening to apply to "jagged" areas. (optional, default `2.0`)
|
||||
- `sigma` **[Number][1]?** the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
|
||||
- `flat` **[Number][1]** the level of sharpening to apply to "flat" areas. (optional, default `1.0`)
|
||||
- `jagged` **[Number][1]** the level of sharpening to apply to "jagged" areas. (optional, default `2.0`)
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -144,12 +88,12 @@ Returns **Sharp**
|
||||
Apply median filter.
|
||||
When used without parameters the default window is 3x3.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `size` **[Number][19]** square mask size: size x size (optional, default `3`)
|
||||
- `size` **[Number][1]** square mask size: size x size (optional, default `3`)
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -159,64 +103,23 @@ Blur the image.
|
||||
When used without parameters, performs a fast, mild blur of the output image.
|
||||
When a `sigma` is provided, performs a slower, more accurate Gaussian blur.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `sigma` **[Number][19]?** a value between 0.3 and 1000 representing the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
|
||||
- `sigma` **[Number][1]?** a value between 0.3 and 1000 representing the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## extend
|
||||
|
||||
Extends/pads the edges of the image with the colour provided to the `background` method.
|
||||
This operation will always occur after resizing and extraction, if any.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `extend` **([Number][19] \| [Object][21])** single pixel count to add to all edges or an Object with per-edge counts
|
||||
- `extend.top` **[Number][19]?**
|
||||
- `extend.left` **[Number][19]?**
|
||||
- `extend.bottom` **[Number][19]?**
|
||||
- `extend.right` **[Number][19]?**
|
||||
|
||||
**Examples**
|
||||
|
||||
```javascript
|
||||
// Resize to 140 pixels wide, then add 10 transparent pixels
|
||||
// to the top, left and right edges and 20 to the bottom edge
|
||||
sharp(input)
|
||||
.resize(140)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.extend({top: 10, bottom: 20, left: 10, right: 10})
|
||||
...
|
||||
```
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## flatten
|
||||
|
||||
Merge alpha transparency channel, if any, with `background`.
|
||||
Merge alpha transparency channel, if any, with a background.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `flatten` **[Boolean][22]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## trim
|
||||
|
||||
Trim "boring" pixels from all edges that contain values within a percentage similarity of the top-left pixel.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `tolerance` **[Number][19]** value between 1 and 99 representing the percentage similarity. (optional, default `10`)
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- `options` **[Object][2]?**
|
||||
- `options.background` **([String][3] \| [Object][2])** background colour, parsed by the [color][4] module, defaults to black. (optional, default `{r:0,g:0,b:0}`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -228,12 +131,15 @@ This can improve the perceived brightness of a resized image in non-linear colou
|
||||
JPEG and WebP input images will not take advantage of the shrink-on-load performance optimisation
|
||||
when applying a gamma correction.
|
||||
|
||||
**Parameters**
|
||||
Supply a second argument to use a different output gamma value, otherwise the first value is used in both cases.
|
||||
|
||||
- `gamma` **[Number][19]** value between 1.0 and 3.0. (optional, default `2.2`)
|
||||
### Parameters
|
||||
|
||||
- `gamma` **[Number][1]** value between 1.0 and 3.0. (optional, default `2.2`)
|
||||
- `gammaOut` **[Number][1]?** value between 1.0 and 3.0. (optional, defaults to same as `gamma`)
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -241,9 +147,9 @@ Returns **Sharp**
|
||||
|
||||
Produce the "negative" of the image.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `negate` **[Boolean][22]** (optional, default `true`)
|
||||
- `negate` **[Boolean][6]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -251,9 +157,9 @@ Returns **Sharp**
|
||||
|
||||
Enhance output image contrast by stretching its luminance to cover the full dynamic range.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `normalise` **[Boolean][22]** (optional, default `true`)
|
||||
- `normalise` **[Boolean][6]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -261,9 +167,9 @@ Returns **Sharp**
|
||||
|
||||
Alternative spelling of normalise.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `normalize` **[Boolean][22]** (optional, default `true`)
|
||||
- `normalize` **[Boolean][6]** (optional, default `true`)
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -271,16 +177,16 @@ Returns **Sharp**
|
||||
|
||||
Convolve the image with the specified kernel.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `kernel` **[Object][21]**
|
||||
- `kernel.width` **[Number][19]** width of the kernel in pixels.
|
||||
- `kernel.height` **[Number][19]** width of the kernel in pixels.
|
||||
- `kernel.kernel` **[Array][23]<[Number][19]>** Array of length `width*height` containing the kernel values.
|
||||
- `kernel.scale` **[Number][19]** the scale of the kernel in pixels. (optional, default `sum`)
|
||||
- `kernel.offset` **[Number][19]** the offset of the kernel in pixels. (optional, default `0`)
|
||||
- `kernel` **[Object][2]**
|
||||
- `kernel.width` **[Number][1]** width of the kernel in pixels.
|
||||
- `kernel.height` **[Number][1]** width of the kernel in pixels.
|
||||
- `kernel.kernel` **[Array][7]<[Number][1]>** Array of length `width*height` containing the kernel values.
|
||||
- `kernel.scale` **[Number][1]** the scale of the kernel in pixels. (optional, default `sum`)
|
||||
- `kernel.offset` **[Number][1]** the offset of the kernel in pixels. (optional, default `0`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
@@ -296,7 +202,7 @@ sharp(input)
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -304,15 +210,15 @@ Returns **Sharp**
|
||||
|
||||
Any pixel value greather than or equal to the threshold value will be set to 255, otherwise it will be set to 0.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `threshold` **[Number][19]** a value in the range 0-255 representing the level at which the threshold will be applied. (optional, default `128`)
|
||||
- `options` **[Object][21]?**
|
||||
- `options.greyscale` **[Boolean][22]** convert to single channel greyscale. (optional, default `true`)
|
||||
- `options.grayscale` **[Boolean][22]** alternative spelling for greyscale. (optional, default `true`)
|
||||
- `threshold` **[Number][1]** a value in the range 0-255 representing the level at which the threshold will be applied. (optional, default `128`)
|
||||
- `options` **[Object][2]?**
|
||||
- `options.greyscale` **[Boolean][6]** convert to single channel greyscale. (optional, default `true`)
|
||||
- `options.grayscale` **[Boolean][6]** alternative spelling for greyscale. (optional, default `true`)
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -323,18 +229,18 @@ Perform a bitwise boolean operation with operand image.
|
||||
This operation creates an output image where each pixel is the result of
|
||||
the selected bitwise boolean `operation` between the corresponding pixels of the input images.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `operand` **([Buffer][24] \| [String][25])** Buffer containing image data or String containing the path to an image file.
|
||||
- `operator` **[String][25]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
|
||||
- `options` **[Object][21]?**
|
||||
- `options.raw` **[Object][21]?** describes operand when using raw pixel data.
|
||||
- `options.raw.width` **[Number][19]?**
|
||||
- `options.raw.height` **[Number][19]?**
|
||||
- `options.raw.channels` **[Number][19]?**
|
||||
- `operand` **([Buffer][8] \| [String][3])** Buffer containing image data or String containing the path to an image file.
|
||||
- `operator` **[String][3]** one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
|
||||
- `options` **[Object][2]?**
|
||||
- `options.raw` **[Object][2]?** describes operand when using raw pixel data.
|
||||
- `options.raw.width` **[Number][1]?**
|
||||
- `options.raw.height` **[Number][1]?**
|
||||
- `options.raw.channels` **[Number][1]?**
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -342,62 +248,57 @@ Returns **Sharp**
|
||||
|
||||
Apply the linear formula a \* input + b to the image (levels adjustment)
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `a` **[Number][19]** multiplier (optional, default `1.0`)
|
||||
- `b` **[Number][19]** offset (optional, default `0.0`)
|
||||
- `a` **[Number][1]** multiplier (optional, default `1.0`)
|
||||
- `b` **[Number][1]** offset (optional, default `0.0`)
|
||||
|
||||
|
||||
- Throws **[Error][20]** Invalid parameters
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #rotate
|
||||
## recomb
|
||||
|
||||
[2]: #extract
|
||||
Recomb the image with the specified matrix.
|
||||
|
||||
[3]: #flip
|
||||
### Parameters
|
||||
|
||||
[4]: #flop
|
||||
- `inputMatrix`
|
||||
- `3x3` **[Array][7]<[Array][7]<[Number][1]>>** Recombination matrix
|
||||
|
||||
[5]: #sharpen
|
||||
### Examples
|
||||
|
||||
[6]: #median
|
||||
```javascript
|
||||
sharp(input)
|
||||
.recomb([
|
||||
[0.3588, 0.7044, 0.1368],
|
||||
[0.2990, 0.5870, 0.1140],
|
||||
[0.2392, 0.4696, 0.0912],
|
||||
])
|
||||
.raw()
|
||||
.toBuffer(function(err, data, info) {
|
||||
// data contains the raw pixel data after applying the recomb
|
||||
// With this example input, a sepia filter has been applied
|
||||
});
|
||||
```
|
||||
|
||||
[7]: #blur
|
||||
- Throws **[Error][5]** Invalid parameters
|
||||
|
||||
[8]: #extend
|
||||
Returns **Sharp**
|
||||
|
||||
[9]: #flatten
|
||||
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[10]: #trim
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[11]: #gamma
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[12]: #negate
|
||||
[4]: https://www.npmjs.org/package/color
|
||||
|
||||
[13]: #normalise
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[14]: #normalize
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[15]: #convolve
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
|
||||
|
||||
[16]: #threshold
|
||||
|
||||
[17]: #boolean
|
||||
|
||||
[18]: #linear
|
||||
|
||||
[19]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[20]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[21]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[22]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[23]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array
|
||||
|
||||
[24]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[25]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
[8]: https://nodejs.org/api/buffer.html
|
||||
|
||||
@@ -1,18 +1,5 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [toFile][1]
|
||||
- [toBuffer][2]
|
||||
- [withMetadata][3]
|
||||
- [jpeg][4]
|
||||
- [png][5]
|
||||
- [webp][6]
|
||||
- [tiff][7]
|
||||
- [raw][8]
|
||||
- [toFormat][9]
|
||||
- [tile][10]
|
||||
|
||||
## toFile
|
||||
|
||||
Write output image data to a file.
|
||||
@@ -23,15 +10,15 @@ Note that raw pixel data is only supported for buffer output.
|
||||
|
||||
A `Promise` is returned when `callback` is not provided.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `fileOut` **[String][11]** the path to write the image data to.
|
||||
- `callback` **[Function][12]?** called on completion with two arguments `(err, info)`.
|
||||
- `fileOut` **[String][1]** the path to write the image data to.
|
||||
- `callback` **[Function][2]?** called on completion with two arguments `(err, info)`.
|
||||
`info` contains the output image `format`, `size` (bytes), `width`, `height`,
|
||||
`channels` and `premultiplied` (indicating if premultiplication was used).
|
||||
When using a crop strategy also contains `cropOffsetLeft` and `cropOffsetTop`.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
@@ -45,9 +32,9 @@ sharp(input)
|
||||
.catch(err => { ... });
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
- Throws **[Error][3]** Invalid parameters
|
||||
|
||||
Returns **[Promise][14]<[Object][15]>** when no callback is provided
|
||||
Returns **[Promise][4]<[Object][5]>** when no callback is provided
|
||||
|
||||
## toBuffer
|
||||
|
||||
@@ -65,13 +52,13 @@ By default, the format will match the input image, except GIF and SVG input whic
|
||||
|
||||
A `Promise` is returned when `callback` is not provided.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `options` **[Object][15]?**
|
||||
- `options.resolveWithObject` **[Boolean][16]?** Resolve the Promise with an Object containing `data` and `info` properties instead of resolving only with `data`.
|
||||
- `callback` **[Function][12]?**
|
||||
- `options` **[Object][5]?**
|
||||
- `options.resolveWithObject` **[Boolean][6]?** Resolve the Promise with an Object containing `data` and `info` properties instead of resolving only with `data`.
|
||||
- `callback` **[Function][2]?**
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
@@ -92,7 +79,7 @@ sharp(input)
|
||||
.catch(err => { ... });
|
||||
```
|
||||
|
||||
Returns **[Promise][14]<[Buffer][17]>** when no callback is provided
|
||||
Returns **[Promise][4]<[Buffer][7]>** when no callback is provided
|
||||
|
||||
## withMetadata
|
||||
|
||||
@@ -100,12 +87,12 @@ Include all metadata (EXIF, XMP, IPTC) from the input image in the output image.
|
||||
The default behaviour, when `withMetadata` is not used, is to strip all metadata and convert to the device-independent sRGB colour space.
|
||||
This will also convert to and add a web-friendly sRGB ICC profile.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `withMetadata` **[Object][15]?**
|
||||
- `withMetadata.orientation` **[Number][18]?** value between 1 and 8, used to update the EXIF `Orientation` tag.
|
||||
- `withMetadata` **[Object][5]?**
|
||||
- `withMetadata.orientation` **[Number][8]?** value between 1 and 8, used to update the EXIF `Orientation` tag.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp('input.jpg')
|
||||
@@ -114,7 +101,7 @@ sharp('input.jpg')
|
||||
.then(info => { ... });
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
- Throws **[Error][3]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -122,19 +109,23 @@ Returns **Sharp**
|
||||
|
||||
Use these JPEG options for output image.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `options` **[Object][15]?** output options
|
||||
- `options.quality` **[Number][18]** quality, integer 1-100 (optional, default `80`)
|
||||
- `options.progressive` **[Boolean][16]** use progressive (interlace) scan (optional, default `false`)
|
||||
- `options.chromaSubsampling` **[String][11]** set to '4:4:4' to prevent chroma subsampling when quality <= 90 (optional, default `'4:2:0'`)
|
||||
- `options.trellisQuantisation` **[Boolean][16]** apply trellis quantisation, requires mozjpeg (optional, default `false`)
|
||||
- `options.overshootDeringing` **[Boolean][16]** apply overshoot deringing, requires mozjpeg (optional, default `false`)
|
||||
- `options.optimiseScans` **[Boolean][16]** optimise progressive scans, forces progressive, requires mozjpeg (optional, default `false`)
|
||||
- `options.optimizeScans` **[Boolean][16]** alternative spelling of optimiseScans (optional, default `false`)
|
||||
- `options.force` **[Boolean][16]** force JPEG output, otherwise attempt to use input format (optional, default `true`)
|
||||
- `options` **[Object][5]?** output options
|
||||
- `options.quality` **[Number][8]** quality, integer 1-100 (optional, default `80`)
|
||||
- `options.progressive` **[Boolean][6]** use progressive (interlace) scan (optional, default `false`)
|
||||
- `options.chromaSubsampling` **[String][1]** set to '4:4:4' to prevent chroma subsampling when quality <= 90 (optional, default `'4:2:0'`)
|
||||
- `options.trellisQuantisation` **[Boolean][6]** apply trellis quantisation, requires mozjpeg (optional, default `false`)
|
||||
- `options.overshootDeringing` **[Boolean][6]** apply overshoot deringing, requires mozjpeg (optional, default `false`)
|
||||
- `options.optimiseScans` **[Boolean][6]** optimise progressive scans, forces progressive, requires mozjpeg (optional, default `false`)
|
||||
- `options.optimizeScans` **[Boolean][6]** alternative spelling of optimiseScans (optional, default `false`)
|
||||
- `options.optimiseCoding` **[Boolean][6]** optimise Huffman coding tables (optional, default `true`)
|
||||
- `options.optimizeCoding` **[Boolean][6]** alternative spelling of optimiseCoding (optional, default `true`)
|
||||
- `options.quantisationTable` **[Number][8]** quantization table to use, integer 0-8, requires mozjpeg (optional, default `0`)
|
||||
- `options.quantizationTable` **[Number][8]** alternative spelling of quantisationTable (optional, default `0`)
|
||||
- `options.force` **[Boolean][6]** force JPEG output, otherwise attempt to use input format (optional, default `true`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Convert any input to very high quality JPEG output
|
||||
@@ -146,7 +137,7 @@ const data = await sharp(input)
|
||||
.toBuffer();
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid options
|
||||
- Throws **[Error][3]** Invalid options
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -157,15 +148,15 @@ Use these PNG options for output image.
|
||||
PNG output is always full colour at 8 or 16 bits per pixel.
|
||||
Indexed PNG input at 1, 2 or 4 bits per pixel is converted to 8 bits per pixel.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `options` **[Object][15]?**
|
||||
- `options.progressive` **[Boolean][16]** use progressive (interlace) scan (optional, default `false`)
|
||||
- `options.compressionLevel` **[Number][18]** zlib compression level, 0-9 (optional, default `9`)
|
||||
- `options.adaptiveFiltering` **[Boolean][16]** use adaptive row filtering (optional, default `false`)
|
||||
- `options.force` **[Boolean][16]** force PNG output, otherwise attempt to use input format (optional, default `true`)
|
||||
- `options` **[Object][5]?**
|
||||
- `options.progressive` **[Boolean][6]** use progressive (interlace) scan (optional, default `false`)
|
||||
- `options.compressionLevel` **[Number][8]** zlib compression level, 0-9 (optional, default `9`)
|
||||
- `options.adaptiveFiltering` **[Boolean][6]** use adaptive row filtering (optional, default `false`)
|
||||
- `options.force` **[Boolean][6]** force PNG output, otherwise attempt to use input format (optional, default `true`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Convert any input to PNG output
|
||||
@@ -174,7 +165,7 @@ const data = await sharp(input)
|
||||
.toBuffer();
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid options
|
||||
- Throws **[Error][3]** Invalid options
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -182,16 +173,16 @@ Returns **Sharp**
|
||||
|
||||
Use these WebP options for output image.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `options` **[Object][15]?** output options
|
||||
- `options.quality` **[Number][18]** quality, integer 1-100 (optional, default `80`)
|
||||
- `options.alphaQuality` **[Number][18]** quality of alpha layer, integer 0-100 (optional, default `100`)
|
||||
- `options.lossless` **[Boolean][16]** use lossless compression mode (optional, default `false`)
|
||||
- `options.nearLossless` **[Boolean][16]** use near_lossless compression mode (optional, default `false`)
|
||||
- `options.force` **[Boolean][16]** force WebP output, otherwise attempt to use input format (optional, default `true`)
|
||||
- `options` **[Object][5]?** output options
|
||||
- `options.quality` **[Number][8]** quality, integer 1-100 (optional, default `80`)
|
||||
- `options.alphaQuality` **[Number][8]** quality of alpha layer, integer 0-100 (optional, default `100`)
|
||||
- `options.lossless` **[Boolean][6]** use lossless compression mode (optional, default `false`)
|
||||
- `options.nearLossless` **[Boolean][6]** use near_lossless compression mode (optional, default `false`)
|
||||
- `options.force` **[Boolean][6]** force WebP output, otherwise attempt to use input format (optional, default `true`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Convert any input to lossless WebP output
|
||||
@@ -200,7 +191,7 @@ const data = await sharp(input)
|
||||
.toBuffer();
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid options
|
||||
- Throws **[Error][3]** Invalid options
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -208,18 +199,22 @@ Returns **Sharp**
|
||||
|
||||
Use these TIFF options for output image.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `options` **[Object][15]?** output options
|
||||
- `options.quality` **[Number][18]** quality, integer 1-100 (optional, default `80`)
|
||||
- `options.force` **[Boolean][16]** force TIFF output, otherwise attempt to use input format (optional, default `true`)
|
||||
- `options.compression` **[Boolean][16]** compression options: lzw, deflate, jpeg, ccittfax4 (optional, default `'jpeg'`)
|
||||
- `options.predictor` **[Boolean][16]** compression predictor options: none, horizontal, float (optional, default `'horizontal'`)
|
||||
- `options.xres` **[Number][18]** horizontal resolution in pixels/mm (optional, default `1.0`)
|
||||
- `options.yres` **[Number][18]** vertical resolution in pixels/mm (optional, default `1.0`)
|
||||
- `options.squash` **[Boolean][16]** squash 8-bit images down to 1 bit (optional, default `false`)
|
||||
- `options` **[Object][5]?** output options
|
||||
- `options.quality` **[Number][8]** quality, integer 1-100 (optional, default `80`)
|
||||
- `options.force` **[Boolean][6]** force TIFF output, otherwise attempt to use input format (optional, default `true`)
|
||||
- `options.compression` **[Boolean][6]** compression options: lzw, deflate, jpeg, ccittfax4 (optional, default `'jpeg'`)
|
||||
- `options.predictor` **[Boolean][6]** compression predictor options: none, horizontal, float (optional, default `'horizontal'`)
|
||||
- `options.pyramid` **[Boolean][6]** write an image pyramid (optional, default `false`)
|
||||
- `options.tile` **[Boolean][6]** write a tiled tiff (optional, default `false`)
|
||||
- `options.tileWidth` **[Boolean][6]** horizontal tile size (optional, default `256`)
|
||||
- `options.tileHeight` **[Boolean][6]** vertical tile size (optional, default `256`)
|
||||
- `options.xres` **[Number][8]** horizontal resolution in pixels/mm (optional, default `1.0`)
|
||||
- `options.yres` **[Number][8]** vertical resolution in pixels/mm (optional, default `1.0`)
|
||||
- `options.squash` **[Boolean][6]** squash 8-bit images down to 1 bit (optional, default `false`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Convert SVG input to LZW-compressed, 1 bit per pixel TIFF output
|
||||
@@ -232,7 +227,7 @@ sharp('input.svg')
|
||||
.then(info => { ... });
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid options
|
||||
- Throws **[Error][3]** Invalid options
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -240,7 +235,7 @@ Returns **Sharp**
|
||||
|
||||
Force output to be raw, uncompressed uint8 pixel data.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Extract raw RGB pixel data from JPEG input
|
||||
@@ -255,12 +250,12 @@ Returns **Sharp**
|
||||
|
||||
Force output to a given format.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `format` **([String][11] \| [Object][15])** as a String or an Object with an 'id' attribute
|
||||
- `options` **[Object][15]** output options
|
||||
- `format` **([String][1] \| [Object][5])** as a String or an Object with an 'id' attribute
|
||||
- `options` **[Object][5]** output options
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Convert any input to PNG output
|
||||
@@ -269,7 +264,7 @@ const data = await sharp(input)
|
||||
.toBuffer();
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** unsupported format or options
|
||||
- Throws **[Error][3]** unsupported format or options
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
@@ -279,16 +274,19 @@ Use tile-based deep zoom (image pyramid) output.
|
||||
Set the format and options for tile images via the `toFormat`, `jpeg`, `png` or `webp` functions.
|
||||
Use a `.zip` or `.szi` file extension with `toFile` to write to a compressed archive file format.
|
||||
|
||||
**Parameters**
|
||||
Warning: multiple sharp instances concurrently producing tile output can expose a possible race condition in some versions of libgsf.
|
||||
|
||||
- `tile` **[Object][15]?**
|
||||
- `tile.size` **[Number][18]** tile size in pixels, a value between 1 and 8192. (optional, default `256`)
|
||||
- `tile.overlap` **[Number][18]** tile overlap in pixels, a value between 0 and 8192. (optional, default `0`)
|
||||
- `tile.angle` **[Number][18]** tile angle of rotation, must be a multiple of 90. (optional, default `0`)
|
||||
- `tile.container` **[String][11]** tile container, with value `fs` (filesystem) or `zip` (compressed file). (optional, default `'fs'`)
|
||||
- `tile.layout` **[String][11]** filesystem layout, possible values are `dz`, `zoomify` or `google`. (optional, default `'dz'`)
|
||||
### Parameters
|
||||
|
||||
**Examples**
|
||||
- `tile` **[Object][5]?**
|
||||
- `tile.size` **[Number][8]** tile size in pixels, a value between 1 and 8192. (optional, default `256`)
|
||||
- `tile.overlap` **[Number][8]** tile overlap in pixels, a value between 0 and 8192. (optional, default `0`)
|
||||
- `tile.angle` **[Number][8]** tile angle of rotation, must be a multiple of 90. (optional, default `0`)
|
||||
- `tile.depth` **[String][1]?** how deep to make the pyramid, possible values are `onepixel`, `onetile` or `one`, default based on layout.
|
||||
- `tile.container` **[String][1]** tile container, with value `fs` (filesystem) or `zip` (compressed file). (optional, default `'fs'`)
|
||||
- `tile.layout` **[String][1]** filesystem layout, possible values are `dz`, `zoomify` or `google`. (optional, default `'dz'`)
|
||||
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp('input.tiff')
|
||||
@@ -302,42 +300,22 @@ sharp('input.tiff')
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
- Throws **[Error][3]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #tofile
|
||||
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[2]: #tobuffer
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Statements/function
|
||||
|
||||
[3]: #withmetadata
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[4]: #jpeg
|
||||
[4]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise
|
||||
|
||||
[5]: #png
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[6]: #webp
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[7]: #tiff
|
||||
[7]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[8]: #raw
|
||||
|
||||
[9]: #toformat
|
||||
|
||||
[10]: #tile
|
||||
|
||||
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[12]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Statements/function
|
||||
|
||||
[13]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
[14]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise
|
||||
|
||||
[15]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[16]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[17]: https://nodejs.org/api/buffer.html
|
||||
|
||||
[18]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
@@ -1,206 +1,234 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [resize][1]
|
||||
- [crop][2]
|
||||
- [embed][3]
|
||||
- [max][4]
|
||||
- [min][5]
|
||||
- [ignoreAspectRatio][6]
|
||||
- [withoutEnlargement][7]
|
||||
|
||||
## resize
|
||||
|
||||
Resize image to `width` x `height`.
|
||||
By default, the resized image is centre cropped to the exact size specified.
|
||||
Resize image to `width`, `height` or `width x height`.
|
||||
|
||||
Possible kernels are:
|
||||
When both a `width` and `height` are provided, the possible methods by which the image should **fit** these are:
|
||||
|
||||
- `nearest`: Use [nearest neighbour interpolation][8].
|
||||
- `cubic`: Use a [Catmull-Rom spline][9].
|
||||
- `lanczos2`: Use a [Lanczos kernel][10] with `a=2`.
|
||||
- `lanczos3`: Use a Lanczos kernel with `a=3` (the default).
|
||||
- `cover`: Crop to cover both provided dimensions (the default).
|
||||
- `contain`: Embed within both provided dimensions.
|
||||
- `fill`: Ignore the aspect ratio of the input and stretch to both provided dimensions.
|
||||
- `inside`: Preserving aspect ratio, resize the image to be as large as possible while ensuring its dimensions are less than or equal to both those specified.
|
||||
- `outside`: Preserving aspect ratio, resize the image to be as small as possible while ensuring its dimensions are greater than or equal to both those specified.
|
||||
Some of these values are based on the [object-fit][1] CSS property.
|
||||
|
||||
**Parameters**
|
||||
When using a `fit` of `cover` or `contain`, the default **position** is `centre`. Other options are:
|
||||
|
||||
- `width` **[Number][11]?** pixels wide the resultant image should be. Use `null` or `undefined` to auto-scale the width to match the height.
|
||||
- `height` **[Number][11]?** pixels high the resultant image should be. Use `null` or `undefined` to auto-scale the height to match the width.
|
||||
- `options` **[Object][12]?**
|
||||
- `options.kernel` **[String][13]** the kernel to use for image reduction. (optional, default `'lanczos3'`)
|
||||
- `options.fastShrinkOnLoad` **[Boolean][14]** take greater advantage of the JPEG and WebP shrink-on-load feature, which can lead to a slight moiré pattern on some images. (optional, default `true`)
|
||||
|
||||
**Examples**
|
||||
|
||||
```javascript
|
||||
sharp(inputBuffer)
|
||||
.resize(200, 300, {
|
||||
kernel: sharp.kernel.nearest
|
||||
})
|
||||
.background('white')
|
||||
.embed()
|
||||
.toFile('output.tiff')
|
||||
.then(function() {
|
||||
// output.tiff is a 200 pixels wide and 300 pixels high image
|
||||
// containing a nearest-neighbour scaled version, embedded on a white canvas,
|
||||
// of the image data in inputBuffer
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][15]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## crop
|
||||
|
||||
Crop the resized image to the exact size specified, the default behaviour.
|
||||
|
||||
Possible attributes of the optional `sharp.gravity` are `north`, `northeast`, `east`, `southeast`, `south`,
|
||||
`southwest`, `west`, `northwest`, `center` and `centre`.
|
||||
- `sharp.position`: `top`, `right top`, `right`, `right bottom`, `bottom`, `left bottom`, `left`, `left top`.
|
||||
- `sharp.gravity`: `north`, `northeast`, `east`, `southeast`, `south`, `southwest`, `west`, `northwest`, `center` or `centre`.
|
||||
- `sharp.strategy`: `cover` only, dynamically crop using either the `entropy` or `attention` strategy.
|
||||
Some of these values are based on the [object-position][2] CSS property.
|
||||
|
||||
The experimental strategy-based approach resizes so one dimension is at its target length
|
||||
then repeatedly ranks edge regions, discarding the edge with the lowest score based on the selected strategy.
|
||||
|
||||
- `entropy`: focus on the region with the highest [Shannon entropy][16].
|
||||
- `entropy`: focus on the region with the highest [Shannon entropy][3].
|
||||
- `attention`: focus on the region with the highest luminance frequency, colour saturation and presence of skin tones.
|
||||
|
||||
**Parameters**
|
||||
Possible interpolation kernels are:
|
||||
|
||||
- `crop` **[String][13]** A member of `sharp.gravity` to crop to an edge/corner or `sharp.strategy` to crop dynamically. (optional, default `'centre'`)
|
||||
- `nearest`: Use [nearest neighbour interpolation][4].
|
||||
- `cubic`: Use a [Catmull-Rom spline][5].
|
||||
- `mitchell`: Use a [Mitchell-Netravali spline][6].
|
||||
- `lanczos2`: Use a [Lanczos kernel][7] with `a=2`.
|
||||
- `lanczos3`: Use a Lanczos kernel with `a=3` (the default).
|
||||
|
||||
**Examples**
|
||||
### Parameters
|
||||
|
||||
- `width` **[Number][8]?** pixels wide the resultant image should be. Use `null` or `undefined` to auto-scale the width to match the height.
|
||||
- `height` **[Number][8]?** pixels high the resultant image should be. Use `null` or `undefined` to auto-scale the height to match the width.
|
||||
- `options` **[Object][9]?**
|
||||
- `options.width` **[String][10]?** alternative means of specifying `width`. If both are present this take priority.
|
||||
- `options.height` **[String][10]?** alternative means of specifying `height`. If both are present this take priority.
|
||||
- `options.fit` **[String][10]** how the image should be resized to fit both provided dimensions, one of `cover`, `contain`, `fill`, `inside` or `outside`. (optional, default `'cover'`)
|
||||
- `options.position` **[String][10]** position, gravity or strategy to use when `fit` is `cover` or `contain`. (optional, default `'centre'`)
|
||||
- `options.background` **([String][10] \| [Object][9])** background colour when using a `fit` of `contain`, parsed by the [color][11] module, defaults to black without transparency. (optional, default `{r:0,g:0,b:0,alpha:1}`)
|
||||
- `options.kernel` **[String][10]** the kernel to use for image reduction. (optional, default `'lanczos3'`)
|
||||
- `options.withoutEnlargement` **[Boolean][12]** do not enlarge if the width _or_ height are already less than the specified dimensions, equivalent to GraphicsMagick's `>` geometry option. (optional, default `false`)
|
||||
- `options.fastShrinkOnLoad` **[Boolean][12]** take greater advantage of the JPEG and WebP shrink-on-load feature, which can lead to a slight moiré pattern on some images. (optional, default `true`)
|
||||
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.resize({ width: 100 })
|
||||
.toBuffer()
|
||||
.then(data => {
|
||||
// 100 pixels wide, auto-scaled height
|
||||
});
|
||||
```
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.resize({ height: 100 })
|
||||
.toBuffer()
|
||||
.then(data => {
|
||||
// 100 pixels high, auto-scaled width
|
||||
});
|
||||
```
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.resize(200, 300, {
|
||||
kernel: sharp.kernel.nearest,
|
||||
fit: 'contain',
|
||||
position: 'right top',
|
||||
background: { r: 255, g: 255, b: 255, alpha: 0.5 }
|
||||
})
|
||||
.toFile('output.png')
|
||||
.then(() => {
|
||||
// output.png is a 200 pixels wide and 300 pixels high image
|
||||
// containing a nearest-neighbour scaled version
|
||||
// contained within the north-east corner of a semi-transparent white canvas
|
||||
});
|
||||
```
|
||||
|
||||
```javascript
|
||||
const transformer = sharp()
|
||||
.resize(200, 200)
|
||||
.crop(sharp.strategy.entropy)
|
||||
.on('error', function(err) {
|
||||
console.log(err);
|
||||
.resize({
|
||||
width: 200,
|
||||
height: 200,
|
||||
fit: sharp.fit.cover,
|
||||
position: sharp.strategy.entropy
|
||||
});
|
||||
// Read image data from readableStream
|
||||
// Write 200px square auto-cropped image data to writableStream
|
||||
readableStream.pipe(transformer).pipe(writableStream);
|
||||
readableStream
|
||||
.pipe(transformer)
|
||||
.pipe(writableStream);
|
||||
```
|
||||
|
||||
- Throws **[Error][15]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## embed
|
||||
|
||||
Preserving aspect ratio, resize the image to the maximum `width` or `height` specified
|
||||
then embed on a background of the exact `width` and `height` specified.
|
||||
|
||||
If the background contains an alpha value then WebP and PNG format output images will
|
||||
contain an alpha channel, even when the input image does not.
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `embed` **[String][13]** A member of `sharp.gravity` to embed to an edge/corner. (optional, default `'centre'`)
|
||||
|
||||
**Examples**
|
||||
|
||||
```javascript
|
||||
sharp('input.gif')
|
||||
.resize(200, 300)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.embed()
|
||||
.toFormat(sharp.format.webp)
|
||||
.toBuffer(function(err, outputBuffer) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
// outputBuffer contains WebP image data of a 200 pixels wide and 300 pixels high
|
||||
// containing a scaled version, embedded on a transparent canvas, of input.gif
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][15]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## max
|
||||
|
||||
Preserving aspect ratio, resize the image to be as large as possible
|
||||
while ensuring its dimensions are less than or equal to the `width` and `height` specified.
|
||||
|
||||
Both `width` and `height` must be provided via `resize` otherwise the behaviour will default to `crop`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```javascript
|
||||
sharp(inputBuffer)
|
||||
.resize(200, 200)
|
||||
.max()
|
||||
sharp(input)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.inside,
|
||||
withoutEnlargement: true
|
||||
})
|
||||
.toFormat('jpeg')
|
||||
.toBuffer()
|
||||
.then(function(outputBuffer) {
|
||||
// outputBuffer contains JPEG image data no wider than 200 pixels and no higher
|
||||
// than 200 pixels regardless of the inputBuffer image dimensions
|
||||
// outputBuffer contains JPEG image data
|
||||
// no wider and no higher than 200 pixels
|
||||
// and no larger than the input image
|
||||
});
|
||||
```
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## min
|
||||
|
||||
Preserving aspect ratio, resize the image to be as small as possible
|
||||
while ensuring its dimensions are greater than or equal to the `width` and `height` specified.
|
||||
|
||||
Both `width` and `height` must be provided via `resize` otherwise the behaviour will default to `crop`.
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## ignoreAspectRatio
|
||||
## extend
|
||||
|
||||
Ignoring the aspect ratio of the input, stretch the image to
|
||||
the exact `width` and/or `height` provided via `resize`.
|
||||
Extends/pads the edges of the image with the provided background colour.
|
||||
This operation will always occur after resizing and extraction, if any.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `extend` **([Number][8] \| [Object][9])** single pixel count to add to all edges or an Object with per-edge counts
|
||||
- `extend.top` **[Number][8]?**
|
||||
- `extend.left` **[Number][8]?**
|
||||
- `extend.bottom` **[Number][8]?**
|
||||
- `extend.right` **[Number][8]?**
|
||||
- `extend.background` **([String][10] \| [Object][9])** background colour, parsed by the [color][11] module, defaults to black without transparency. (optional, default `{r:0,g:0,b:0,alpha:1}`)
|
||||
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
// Resize to 140 pixels wide, then add 10 transparent pixels
|
||||
// to the top, left and right edges and 20 to the bottom edge
|
||||
sharp(input)
|
||||
.resize(140)
|
||||
.extend({
|
||||
top: 10,
|
||||
bottom: 20,
|
||||
left: 10,
|
||||
right: 10
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
})
|
||||
...
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
## withoutEnlargement
|
||||
## extract
|
||||
|
||||
Do not enlarge the output image if the input image width _or_ height are already less than the required dimensions.
|
||||
This is equivalent to GraphicsMagick's `>` geometry option:
|
||||
"_change the dimensions of the image only if its width or height exceeds the geometry specification_".
|
||||
Use with `max()` to preserve the image's aspect ratio.
|
||||
Extract a region of the image.
|
||||
|
||||
The default behaviour _before_ function call is `false`, meaning the image will be enlarged.
|
||||
- Use `extract` before `resize` for pre-resize extraction.
|
||||
- Use `extract` after `resize` for post-resize extraction.
|
||||
- Use `extract` before and after for both.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `withoutEnlargement` **[Boolean][14]** (optional, default `true`)
|
||||
- `options` **[Object][9]**
|
||||
- `options.left` **[Number][8]** zero-indexed offset from left edge
|
||||
- `options.top` **[Number][8]** zero-indexed offset from top edge
|
||||
- `options.width` **[Number][8]** dimension of extracted image
|
||||
- `options.height` **[Number][8]** dimension of extracted image
|
||||
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.extract({ left: left, top: top, width: width, height: height })
|
||||
.toFile(output, function(err) {
|
||||
// Extract a region of the input image, saving in the same format.
|
||||
});
|
||||
```
|
||||
|
||||
```javascript
|
||||
sharp(input)
|
||||
.extract({ left: leftOffsetPre, top: topOffsetPre, width: widthPre, height: heightPre })
|
||||
.resize(width, height)
|
||||
.extract({ left: leftOffsetPost, top: topOffsetPost, width: widthPost, height: heightPost })
|
||||
.toFile(output, function(err) {
|
||||
// Extract a region, resize, then extract from the resized image
|
||||
});
|
||||
```
|
||||
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
|
||||
Returns **Sharp**
|
||||
|
||||
[1]: #resize
|
||||
## trim
|
||||
|
||||
[2]: #crop
|
||||
Trim "boring" pixels from all edges that contain values similar to the top-left pixel.
|
||||
The `info` response Object will contain `trimOffsetLeft` and `trimOffsetTop` properties.
|
||||
|
||||
[3]: #embed
|
||||
### Parameters
|
||||
|
||||
[4]: #max
|
||||
- `threshold` **[Number][8]** the allowed difference from the top-left pixel, a number greater than zero. (optional, default `10`)
|
||||
|
||||
[5]: #min
|
||||
|
||||
[6]: #ignoreaspectratio
|
||||
- Throws **[Error][13]** Invalid parameters
|
||||
|
||||
[7]: #withoutenlargement
|
||||
Returns **Sharp**
|
||||
|
||||
[8]: http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
|
||||
[1]: https://developer.mozilla.org/en-US/docs/Web/CSS/object-fit
|
||||
|
||||
[9]: https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
|
||||
[2]: https://developer.mozilla.org/en-US/docs/Web/CSS/object-position
|
||||
|
||||
[10]: https://en.wikipedia.org/wiki/Lanczos_resampling#Lanczos_kernel
|
||||
[3]: https://en.wikipedia.org/wiki/Entropy_%28information_theory%29
|
||||
|
||||
[11]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
[4]: http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
|
||||
|
||||
[12]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
[5]: https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
|
||||
|
||||
[13]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
[6]: https://www.cs.utexas.edu/~fussell/courses/cs384g-fall2013/lectures/mitchell/Mitchell.pdf
|
||||
|
||||
[14]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
[7]: https://en.wikipedia.org/wiki/Lanczos_resampling#Lanczos_kernel
|
||||
|
||||
[15]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
[8]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
[16]: https://en.wikipedia.org/wiki/Entropy_%28information_theory%29
|
||||
[9]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[10]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String
|
||||
|
||||
[11]: https://www.npmjs.org/package/color
|
||||
|
||||
[12]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[13]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error
|
||||
|
||||
@@ -1,12 +1,5 @@
|
||||
<!-- Generated by documentation.js. Update this documentation by updating the source code. -->
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [cache][1]
|
||||
- [concurrency][2]
|
||||
- [counters][3]
|
||||
- [simd][4]
|
||||
|
||||
## cache
|
||||
|
||||
Gets or, when options are provided, sets the limits of _libvips'_ operation cache.
|
||||
@@ -14,14 +7,14 @@ Existing entries in the cache will be trimmed after any change in limits.
|
||||
This method always returns cache statistics,
|
||||
useful for determining how much working memory is required for a particular task.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `options` **([Object][5] \| [Boolean][6])** Object with the following attributes, or Boolean where true uses default cache settings and false removes all caching (optional, default `true`)
|
||||
- `options.memory` **[Number][7]** is the maximum memory in MB to use for this cache (optional, default `50`)
|
||||
- `options.files` **[Number][7]** is the maximum number of files to hold open (optional, default `20`)
|
||||
- `options.items` **[Number][7]** is the maximum number of operations to cache (optional, default `100`)
|
||||
- `options` **([Object][1] \| [Boolean][2])** Object with the following attributes, or Boolean where true uses default cache settings and false removes all caching (optional, default `true`)
|
||||
- `options.memory` **[Number][3]** is the maximum memory in MB to use for this cache (optional, default `50`)
|
||||
- `options.files` **[Number][3]** is the maximum number of files to hold open (optional, default `20`)
|
||||
- `options.items` **[Number][3]** is the maximum number of operations to cache (optional, default `100`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const stats = sharp.cache();
|
||||
@@ -33,7 +26,7 @@ sharp.cache( { files: 0 } );
|
||||
sharp.cache(false);
|
||||
```
|
||||
|
||||
Returns **[Object][5]**
|
||||
Returns **[Object][1]**
|
||||
|
||||
## concurrency
|
||||
|
||||
@@ -47,11 +40,11 @@ is limited by libuv's `UV_THREADPOOL_SIZE` environment variable.
|
||||
|
||||
This method always returns the current concurrency.
|
||||
|
||||
**Parameters**
|
||||
### Parameters
|
||||
|
||||
- `concurrency` **[Number][7]?**
|
||||
- `concurrency` **[Number][3]?**
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const threads = sharp.concurrency(); // 4
|
||||
@@ -59,7 +52,7 @@ sharp.concurrency(2); // 2
|
||||
sharp.concurrency(0); // 4
|
||||
```
|
||||
|
||||
Returns **[Number][7]** concurrency
|
||||
Returns **[Number][3]** concurrency
|
||||
|
||||
## counters
|
||||
|
||||
@@ -68,13 +61,13 @@ Provides access to internal task counters.
|
||||
- queue is the number of tasks this module has queued waiting for _libuv_ to provide a worker thread from its pool.
|
||||
- process is the number of resize tasks currently being processed.
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const counters = sharp.counters(); // { queue: 2, process: 4 }
|
||||
```
|
||||
|
||||
Returns **[Object][5]**
|
||||
Returns **[Object][1]**
|
||||
|
||||
## simd
|
||||
|
||||
@@ -84,37 +77,26 @@ Requires libvips to have been compiled with liborc support.
|
||||
Improves the performance of `resize`, `blur` and `sharpen` operations
|
||||
by taking advantage of the SIMD vector unit of the CPU, e.g. Intel SSE and ARM NEON.
|
||||
|
||||
This feature is currently off by default but future versions may reverse this.
|
||||
Versions of liborc prior to 0.4.25 are known to segfault under heavy load.
|
||||
### Parameters
|
||||
|
||||
**Parameters**
|
||||
- `simd` **[Boolean][2]** (optional, default `true`)
|
||||
|
||||
- `simd` **[Boolean][6]** (optional, default `false`)
|
||||
|
||||
**Examples**
|
||||
### Examples
|
||||
|
||||
```javascript
|
||||
const simd = sharp.simd();
|
||||
// simd is `true` if SIMD is currently enabled
|
||||
// simd is `true` if the runtime use of liborc is currently enabled
|
||||
```
|
||||
|
||||
```javascript
|
||||
const simd = sharp.simd(true);
|
||||
// attempts to enable the use of SIMD, returning true if available
|
||||
const simd = sharp.simd(false);
|
||||
// prevent libvips from using liborc at runtime
|
||||
```
|
||||
|
||||
Returns **[Boolean][6]**
|
||||
Returns **[Boolean][2]**
|
||||
|
||||
[1]: #cache
|
||||
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[2]: #concurrency
|
||||
[2]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[3]: #counters
|
||||
|
||||
[4]: #simd
|
||||
|
||||
[5]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
|
||||
|
||||
[6]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean
|
||||
|
||||
[7]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
[3]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number
|
||||
|
||||
@@ -1,9 +1,147 @@
|
||||
# Changelog
|
||||
|
||||
### v0.21 - "*teeth*"
|
||||
|
||||
Requires libvips v8.7.0.
|
||||
|
||||
#### v0.21.1 - 7<sup>th</sup> December 2018
|
||||
|
||||
* Install: support `sharp_dist_base_url` npm config, like existing `SHARP_DIST_BASE_URL`.
|
||||
[#1422](https://github.com/lovell/sharp/pull/1422)
|
||||
[@SethWen](https://github.com/SethWen)
|
||||
|
||||
* Ensure `channel` metadata is correct for raw, greyscale output.
|
||||
[#1425](https://github.com/lovell/sharp/issues/1425)
|
||||
|
||||
* Add support for the "mitchell" kernel for image reductions.
|
||||
[#1438](https://github.com/lovell/sharp/pull/1438)
|
||||
[@Daiz](https://github.com/Daiz)
|
||||
|
||||
* Allow separate parameters for gamma encoding and decoding.
|
||||
[#1439](https://github.com/lovell/sharp/pull/1439)
|
||||
[@Daiz](https://github.com/Daiz)
|
||||
|
||||
* Build prototype with `Object.assign` to allow minification.
|
||||
[#1475](https://github.com/lovell/sharp/pull/1475)
|
||||
[@jaubourg](https://github.com/jaubourg)
|
||||
|
||||
* Expose libvips' recombination matrix operation.
|
||||
[#1477](https://github.com/lovell/sharp/pull/1477)
|
||||
[@fromkeith](https://github.com/fromkeith)
|
||||
|
||||
* Expose libvips' pyramid/tile options for TIFF output.
|
||||
[#1483](https://github.com/lovell/sharp/pull/1483)
|
||||
[@mbklein](https://github.com/mbklein)
|
||||
|
||||
#### v0.21.0 - 4<sup>th</sup> October 2018
|
||||
|
||||
* Deprecate the following resize-related functions:
|
||||
`crop`, `embed`, `ignoreAspectRatio`, `max`, `min` and `withoutEnlargement`.
|
||||
Access to these is now via options passed to the `resize` function.
|
||||
For example:
|
||||
`embed('north')` is now `resize(width, height, { fit: 'contain', position: 'north' })`,
|
||||
`crop('attention')` is now `resize(width, height, { fit: 'cover', position: 'attention' })`,
|
||||
`max().withoutEnlargement()` is now `resize(width, height, { fit: 'inside', withoutEnlargement: true })`.
|
||||
[#1135](https://github.com/lovell/sharp/issues/1135)
|
||||
|
||||
* Deprecate the `background` function.
|
||||
Per-operation `background` options added to `resize`, `extend` and `flatten` operations.
|
||||
[#1392](https://github.com/lovell/sharp/issues/1392)
|
||||
|
||||
* Add `size` to `metadata` response (Stream and Buffer input only).
|
||||
[#695](https://github.com/lovell/sharp/issues/695)
|
||||
|
||||
* Switch from custom trim operation to `vips_find_trim`.
|
||||
[#914](https://github.com/lovell/sharp/issues/914)
|
||||
|
||||
* Add `chromaSubsampling` and `isProgressive` properties to `metadata` response.
|
||||
[#1186](https://github.com/lovell/sharp/issues/1186)
|
||||
|
||||
* Drop Node 4 support.
|
||||
[#1212](https://github.com/lovell/sharp/issues/1212)
|
||||
|
||||
* Enable SIMD convolution by default.
|
||||
[#1213](https://github.com/lovell/sharp/issues/1213)
|
||||
|
||||
* Add experimental prebuilt binaries for musl-based Linux.
|
||||
[#1379](https://github.com/lovell/sharp/issues/1379)
|
||||
|
||||
* Add support for arbitrary rotation angle via vips_rotate.
|
||||
[#1385](https://github.com/lovell/sharp/pull/1385)
|
||||
[@freezy](https://github.com/freezy)
|
||||
|
||||
### v0.20 - "*prebuild*"
|
||||
|
||||
Requires libvips v8.6.1.
|
||||
|
||||
#### v0.20.8 - 5<sup>th</sup> September 2018
|
||||
|
||||
* Avoid race conditions when creating directories during installation.
|
||||
[#1358](https://github.com/lovell/sharp/pull/1358)
|
||||
[@ajhool](https://github.com/ajhool)
|
||||
|
||||
* Accept floating point values for input density parameter.
|
||||
[#1362](https://github.com/lovell/sharp/pull/1362)
|
||||
[@aeirola](https://github.com/aeirola)
|
||||
|
||||
#### v0.20.7 - 21<sup>st</sup> August 2018
|
||||
|
||||
* Use copy+unlink if rename operation fails during installation.
|
||||
[#1345](https://github.com/lovell/sharp/issues/1345)
|
||||
|
||||
#### v0.20.6 - 20<sup>th</sup> August 2018
|
||||
|
||||
* Add removeAlpha operation to remove alpha channel, if any.
|
||||
[#1248](https://github.com/lovell/sharp/issues/1248)
|
||||
|
||||
* Expose mozjpeg quant_table flag.
|
||||
[#1285](https://github.com/lovell/sharp/pull/1285)
|
||||
[@rexxars](https://github.com/rexxars)
|
||||
|
||||
* Allow full WebP alphaQuality range of 0-100.
|
||||
[#1290](https://github.com/lovell/sharp/pull/1290)
|
||||
[@sylvaindumont](https://github.com/sylvaindumont)
|
||||
|
||||
* Cache libvips binaries to reduce re-install time.
|
||||
[#1301](https://github.com/lovell/sharp/issues/1301)
|
||||
|
||||
* Ensure vendor platform mismatch throws error at install time.
|
||||
[#1303](https://github.com/lovell/sharp/issues/1303)
|
||||
|
||||
* Improve install time error messages for FreeBSD users.
|
||||
[#1310](https://github.com/lovell/sharp/issues/1310)
|
||||
|
||||
* Ensure extractChannel works with 16-bit images.
|
||||
[#1330](https://github.com/lovell/sharp/issues/1330)
|
||||
|
||||
* Expose depth option for tile-based output.
|
||||
[#1342](https://github.com/lovell/sharp/pull/1342)
|
||||
[@alundavies](https://github.com/alundavies)
|
||||
|
||||
* Add experimental entropy field to stats response.
|
||||
|
||||
#### v0.20.5 - 27<sup>th</sup> June 2018
|
||||
|
||||
* Expose libjpeg optimize_coding flag.
|
||||
[#1265](https://github.com/lovell/sharp/pull/1265)
|
||||
[@tomlokhorst](https://github.com/tomlokhorst)
|
||||
|
||||
#### v0.20.4 - 20<sup>th</sup> June 2018
|
||||
|
||||
* Prevent possible rounding error when using shrink-on-load and 90/270 degree rotation.
|
||||
[#1241](https://github.com/lovell/sharp/issues/1241)
|
||||
[@anahit42](https://github.com/anahit42)
|
||||
|
||||
* Ensure extractChannel sets correct single-channel colour space interpretation.
|
||||
[#1257](https://github.com/lovell/sharp/issues/1257)
|
||||
[@jeremychone](https://github.com/jeremychone)
|
||||
|
||||
#### v0.20.3 - 29<sup>th</sup> May 2018
|
||||
|
||||
* Fix tint operation by ensuring LAB interpretation and allowing negative values.
|
||||
[#1235](https://github.com/lovell/sharp/issues/1235)
|
||||
[@wezside](https://github.com/wezside)
|
||||
|
||||
#### v0.20.2 - 28<sup>th</sup> April 2018
|
||||
|
||||
* Add tint operation to set image chroma.
|
||||
|
||||
5
docs/css/extra.css
Normal file
@@ -0,0 +1,5 @@
|
||||
/* Nest document subheadings in navigation */
|
||||
ul.subnav ul:not(.subnav) {
|
||||
padding-left: 2em;
|
||||
font-size: 80%;
|
||||
}
|
||||
@@ -13,8 +13,8 @@ Lanczos resampling ensures quality is not sacrificed for speed.
|
||||
As well as image resizing, operations such as
|
||||
rotation, extraction, compositing and gamma correction are available.
|
||||
|
||||
Most 64-bit OS X, Windows and Linux (glibc) systems running
|
||||
Node versions 4, 6, 8 and 10
|
||||
Most modern 64-bit OS X, Windows and Linux systems running
|
||||
Node versions 6, 8 and 10
|
||||
do not require any additional install or runtime dependencies.
|
||||
|
||||
[](https://coveralls.io/r/lovell/sharp?branch=master)
|
||||
@@ -37,7 +37,7 @@ and [Leaflet](https://github.com/turban/Leaflet.Zoomify).
|
||||
### Fast
|
||||
|
||||
This module is powered by the blazingly fast
|
||||
[libvips](https://github.com/jcupitt/libvips) image processing library,
|
||||
[libvips](https://github.com/libvips/libvips) image processing library,
|
||||
originally created in 1989 at Birkbeck College
|
||||
and currently maintained by
|
||||
[John Cupitt](https://github.com/jcupitt).
|
||||
@@ -112,10 +112,20 @@ the help and code contributions of the following people:
|
||||
* [Rik Heywood](https://github.com/rikh42)
|
||||
* [Thomas Parisot](https://github.com/oncletom)
|
||||
* [Nathan Graves](https://github.com/woolite64)
|
||||
* [Tom Lokhorst](https://github.com/tomlokhorst)
|
||||
* [Espen Hovlandsdal](https://github.com/rexxars)
|
||||
* [Sylvain Dumont](https://github.com/sylvaindumont)
|
||||
* [Alun Davies](https://github.com/alundavies)
|
||||
* [Aidan Hoolachan](https://github.com/ajhool)
|
||||
* [Axel Eirola](https://github.com/aeirola)
|
||||
* [Freezy](https://github.com/freezy)
|
||||
* [Julian Aubourg](https://github.com/jaubourg)
|
||||
* [Keith Belovay](https://github.com/fromkeith)
|
||||
* [Michael B. Klein](https://github.com/mbklein)
|
||||
|
||||
Thank you!
|
||||
|
||||
### Licence
|
||||
### Licensing
|
||||
|
||||
Copyright 2013, 2014, 2015, 2016, 2017, 2018 Lovell Fuller and contributors.
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ yarn add sharp
|
||||
### Building from source
|
||||
|
||||
Pre-compiled binaries for sharp are provided for use with
|
||||
Node versions 4, 6, 8 and 9 on
|
||||
Node versions 6, 8 and 10 on
|
||||
64-bit Windows, OS X and Linux platforms.
|
||||
|
||||
Sharp will be built from source at install time when:
|
||||
@@ -27,7 +27,7 @@ Sharp will be built from source at install time when:
|
||||
Building from source requires:
|
||||
|
||||
* C++11 compatible compiler such as gcc 4.8+, clang 3.0+ or MSVC 2013+
|
||||
* [node-gyp](https://github.com/TooTallNate/node-gyp#installation) and its dependencies (includes Python)
|
||||
* [node-gyp](https://github.com/nodejs/node-gyp#installation) and its dependencies (includes Python 2.7)
|
||||
|
||||
## libvips
|
||||
|
||||
@@ -36,13 +36,14 @@ Building from source requires:
|
||||
[](https://travis-ci.org/lovell/sharp)
|
||||
|
||||
libvips and its dependencies are fetched and stored within `node_modules/sharp/vendor` during `npm install`.
|
||||
This involves an automated HTTPS download of approximately 7MB.
|
||||
This involves an automated HTTPS download of approximately 8MB.
|
||||
|
||||
Most recent Linux-based operating systems with glibc running on x64 and ARMv6+ CPUs should "just work", e.g.:
|
||||
Most Linux-based (glibc, musl) operating systems running on x64 and ARMv6+ CPUs should "just work", e.g.:
|
||||
|
||||
* Debian 7+
|
||||
* Ubuntu 14.04+
|
||||
* Centos 7+
|
||||
* Alpine 3.8+ (Node 8 and 10)
|
||||
* Fedora
|
||||
* openSUSE 13.2+
|
||||
* Archlinux
|
||||
@@ -61,9 +62,9 @@ and `LD_LIBRARY_PATH` at runtime.
|
||||
This allows the use of newer versions of libvips with older versions of sharp.
|
||||
|
||||
For 32-bit Intel CPUs and older Linux-based operating systems such as Centos 6,
|
||||
it is recommended to install a system-wide installation of libvips from source:
|
||||
compiling libvips from source is recommended.
|
||||
|
||||
https://jcupitt.github.io/libvips/install.html#building-libvips-from-a-source-tarball
|
||||
[https://libvips.github.io/libvips/install.html#building-libvips-from-a-source-tarball](https://libvips.github.io/libvips/install.html#building-libvips-from-a-source-tarball)
|
||||
|
||||
#### Alpine Linux
|
||||
|
||||
@@ -71,7 +72,9 @@ libvips is available in the
|
||||
[testing repository](https://pkgs.alpinelinux.org/packages?name=vips-dev):
|
||||
|
||||
```sh
|
||||
apk add vips-dev fftw-dev --update-cache --repository https://dl-3.alpinelinux.org/alpine/edge/testing/
|
||||
apk add vips-dev fftw-dev build-base --update-cache \
|
||||
--repository https://alpine.global.ssl.fastly.net/alpine/edge/testing/ \
|
||||
--repository https://alpine.global.ssl.fastly.net/alpine/edge/main
|
||||
```
|
||||
|
||||
The smaller stack size of musl libc means
|
||||
@@ -94,7 +97,8 @@ that it can be located using `pkg-config --modversion vips-cpp`.
|
||||
[](https://ci.appveyor.com/project/lovell/sharp)
|
||||
|
||||
libvips and its dependencies are fetched and stored within `node_modules\sharp\vendor` during `npm install`.
|
||||
This involves an automated HTTPS download of approximately 12MB.
|
||||
This involves an automated HTTPS download of approximately 13MB. If you are having issues during
|
||||
installation consider removing the directory ```C:\Users\[user]\AppData\Roaming\npm-cache\_libvips```.
|
||||
|
||||
Only 64-bit (x64) `node.exe` is supported.
|
||||
|
||||
@@ -117,9 +121,6 @@ https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=193528
|
||||
|
||||
### Heroku
|
||||
|
||||
libvips and its dependencies are fetched and stored within `node_modules\sharp\vendor` during `npm install`.
|
||||
This involves an automated HTTPS download of approximately 7MB.
|
||||
|
||||
Set [NODE_MODULES_CACHE](https://devcenter.heroku.com/articles/nodejs-support#cache-behavior)
|
||||
to `false` when using the `yarn` package manager.
|
||||
|
||||
@@ -148,18 +149,40 @@ docker pull tailor/docker-libvips
|
||||
|
||||
### AWS Lambda
|
||||
|
||||
A [deployment package](http://docs.aws.amazon.com/lambda/latest/dg/nodejs-create-deployment-pkg.html) for the
|
||||
[Lambda Execution Environment](http://docs.aws.amazon.com/lambda/latest/dg/current-supported-versions.html)
|
||||
can be built using Docker.
|
||||
Set the Lambda runtime to Node.js 8.10.
|
||||
|
||||
The binaries in the `node_modules` directory of the
|
||||
[deployment package](https://docs.aws.amazon.com/lambda/latest/dg/nodejs-create-deployment-pkg.html)
|
||||
must be for the Linux x64 platform/architecture.
|
||||
|
||||
On non-Linux machines such as OS X and Windows run the following:
|
||||
|
||||
```sh
|
||||
rm -rf node_modules/sharp
|
||||
docker run -v "$PWD":/var/task lambci/lambda:build-nodejs6.10 npm install
|
||||
npm install --arch=x64 --platform=linux --target=8.10.0 sharp
|
||||
```
|
||||
|
||||
Set the Lambda runtime to Node.js 6.10.
|
||||
Alternatively a Docker container closely matching the Lambda runtime can be used:
|
||||
|
||||
To get the best performance select the largest memory available. A 1536 MB function provides ~12x more CPU time than a 128 MB function.
|
||||
```sh
|
||||
rm -rf node_modules/sharp
|
||||
docker run -v "$PWD":/var/task lambci/lambda:build-nodejs8.10 npm install sharp
|
||||
```
|
||||
|
||||
To get the best performance select the largest memory available.
|
||||
A 1536 MB function provides ~12x more CPU time than a 128 MB function.
|
||||
|
||||
### NW.js
|
||||
|
||||
Run the `nw-gyp` tool after installation.
|
||||
|
||||
```sh
|
||||
cd node-modules/sharp
|
||||
nw-gyp rebuild --arch=x64 --target=[your nw version]
|
||||
node node_modules/sharp/install/dll-copy
|
||||
```
|
||||
|
||||
[http://docs.nwjs.io/en/latest/For%20Users/Advanced/Use%20Native%20Node%20Modules/](http://docs.nwjs.io/en/latest/For%20Users/Advanced/Use%20Native%20Node%20Modules/)
|
||||
|
||||
### Build tools
|
||||
|
||||
@@ -187,28 +210,6 @@ and [Valgrind](http://valgrind.org/) have been used to test
|
||||
the most popular web-based formats, as well as libvips itself,
|
||||
you are advised to perform your own testing and sandboxing.
|
||||
|
||||
ImageMagick in particular has a relatively large attack surface,
|
||||
which can be partially mitigated with a
|
||||
[policy.xml](http://www.imagemagick.org/script/resources.php)
|
||||
configuration file to prevent the use of coders known to be vulnerable.
|
||||
|
||||
```xml
|
||||
<policymap>
|
||||
<policy domain="coder" rights="none" pattern="EPHEMERAL" />
|
||||
<policy domain="coder" rights="none" pattern="URL" />
|
||||
<policy domain="coder" rights="none" pattern="HTTPS" />
|
||||
<policy domain="coder" rights="none" pattern="MVG" />
|
||||
<policy domain="coder" rights="none" pattern="MSL" />
|
||||
<policy domain="coder" rights="none" pattern="TEXT" />
|
||||
<policy domain="coder" rights="none" pattern="SHOW" />
|
||||
<policy domain="coder" rights="none" pattern="WIN" />
|
||||
<policy domain="coder" rights="none" pattern="PLT" />
|
||||
</policymap>
|
||||
```
|
||||
|
||||
Set the `MAGICK_CONFIGURE_PATH` environment variable
|
||||
to the directory containing the `policy.xml` file.
|
||||
|
||||
### Pre-compiled libvips binaries
|
||||
|
||||
This module will attempt to download a pre-compiled bundle of libvips
|
||||
@@ -224,10 +225,18 @@ SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install sharp
|
||||
```
|
||||
|
||||
Should you need to manually download and inspect these files,
|
||||
you can do so via https://github.com/lovell/sharp-libvips/releases
|
||||
you can do so via
|
||||
[https://github.com/lovell/sharp-libvips/releases](https://github.com/lovell/sharp-libvips/releases)
|
||||
|
||||
Should you wish to install these from your own location,
|
||||
set the `SHARP_DIST_BASE_URL` environment variable, e.g.
|
||||
set the `sharp_dist_base_url` npm config option, e.g.
|
||||
|
||||
```sh
|
||||
npm config set sharp_dist_base_url "https://hostname/path/"
|
||||
npm install sharp
|
||||
```
|
||||
|
||||
or set the `SHARP_DIST_BASE_URL` environment variable, e.g.
|
||||
|
||||
```sh
|
||||
SHARP_DIST_BASE_URL="https://hostname/path/" npm install sharp
|
||||
@@ -253,6 +262,8 @@ Use of libraries under the terms of the LGPLv3 is via the
|
||||
| expat | MIT Licence |
|
||||
| fontconfig | [fontconfig Licence](https://cgit.freedesktop.org/fontconfig/tree/COPYING) (BSD-like) |
|
||||
| freetype | [freetype Licence](http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/FTL.TXT) (BSD-like) |
|
||||
| fribidi | LGPLv3 |
|
||||
| gettext | LGPLv3 |
|
||||
| giflib | MIT Licence |
|
||||
| glib | LGPLv3 |
|
||||
| harfbuzz | MIT Licence |
|
||||
|
||||
@@ -3,19 +3,17 @@
|
||||
### Test environment
|
||||
|
||||
* AWS EC2 eu-west-1 [c5.large](https://aws.amazon.com/ec2/instance-types/c5/) (2x Xeon Platinum 8124M CPU @ 3.00GHz)
|
||||
* Ubuntu 17.10 (hvm:ebs-ssd, 20180102, ami-0741d47e)
|
||||
* Node.js v8.9.4
|
||||
* Ubuntu 18.04 (hvm-ssd/ubuntu-bionic-18.04-amd64-server-20180912 ami-00035f41c82244dab)
|
||||
* Node.js v10.11.0
|
||||
|
||||
### The contenders
|
||||
|
||||
* [jimp](https://www.npmjs.com/package/jimp) v0.2.28 - Image processing in pure JavaScript. Bilinear interpolation only.
|
||||
* [pajk-lwip](https://www.npmjs.com/package/pajk-lwip) v0.2.0 (fork) - Wrapper around CImg that compiles dependencies from source.
|
||||
* [mapnik](https://www.npmjs.org/package/mapnik) v3.6.2 - Whilst primarily a map renderer, Mapnik contains bitmap image utilities.
|
||||
* [jimp](https://www.npmjs.com/package/jimp) v0.5.3 - Image processing in pure JavaScript. Provides bicubic interpolation.
|
||||
* [mapnik](https://www.npmjs.org/package/mapnik) v4.0.1 - Whilst primarily a map renderer, Mapnik contains bitmap image utilities.
|
||||
* [imagemagick-native](https://www.npmjs.com/package/imagemagick-native) v1.9.3 - Wrapper around libmagick++, supports Buffers only.
|
||||
* [imagemagick](https://www.npmjs.com/package/imagemagick) v0.1.3 - Supports filesystem only and "*has been unmaintained for a long time*".
|
||||
* [gm](https://www.npmjs.com/package/gm) v1.23.1 - Fully featured wrapper around GraphicsMagick's `gm` command line utility.
|
||||
* [images](https://www.npmjs.com/package/images) v3.0.1 - Compiles dependencies from source. Provides bicubic interpolation.
|
||||
* sharp v0.19.0 / libvips v8.6.1 - Caching within libvips disabled to ensure a fair comparison.
|
||||
* sharp v0.21.0 / libvips v8.7.0 - Caching within libvips disabled to ensure a fair comparison.
|
||||
|
||||
### The task
|
||||
|
||||
@@ -27,19 +25,14 @@ then compress to JPEG at a "quality" setting of 80.
|
||||
|
||||
| Module | Input | Output | Ops/sec | Speed-up |
|
||||
| :----------------- | :----- | :----- | ------: | -------: |
|
||||
| jimp (bilinear) | buffer | buffer | 1.14 | 1.0 |
|
||||
| lwip | buffer | buffer | 1.86 | 1.6 |
|
||||
| mapnik | buffer | buffer | 3.34 | 2.9 |
|
||||
| imagemagick-native | buffer | buffer | 4.13 | 3.6 |
|
||||
| gm | buffer | buffer | 4.21 | 3.7 |
|
||||
| gm | file | file | 4.27 | 3.7 |
|
||||
| imagemagick | file | file | 4.67 | 4.1 |
|
||||
| images (bicubic) | file | file | 6.22 | 5.5 |
|
||||
| sharp | stream | stream | 24.43 | 21.4 |
|
||||
| sharp | file | file | 25.97 | 22.7 |
|
||||
| sharp | file | buffer | 26.00 | 22.8 |
|
||||
| sharp | buffer | file | 26.33 | 23.0 |
|
||||
| sharp | buffer | buffer | 26.43 | 23.1 |
|
||||
| jimp | buffer | buffer | 0.71 | 1.0 |
|
||||
| mapnik | buffer | buffer | 3.32 | 4.7 |
|
||||
| gm | buffer | buffer | 3.97 | 5.6 |
|
||||
| imagemagick-native | buffer | buffer | 4.06 | 5.7 |
|
||||
| imagemagick | file | file | 4.24 | 6.0 |
|
||||
| sharp | stream | stream | 25.30 | 35.6 |
|
||||
| sharp | file | file | 26.17 | 36.9 |
|
||||
| sharp | buffer | buffer | 26.45 | 37.3 |
|
||||
|
||||
Greater libvips performance can be expected with caching enabled (default)
|
||||
and using 8+ core machines, especially those with larger L1/L2 CPU caches.
|
||||
@@ -57,7 +50,7 @@ brew install mapnik
|
||||
```
|
||||
|
||||
```sh
|
||||
sudo apt-get install imagemagick libmagick++-dev graphicsmagick mapnik-dev
|
||||
sudo apt-get install imagemagick libmagick++-dev graphicsmagick libmapnik-dev
|
||||
```
|
||||
|
||||
```sh
|
||||
|
||||
@@ -4,6 +4,7 @@ const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const copyFileSync = require('fs-copy-file-sync');
|
||||
const libvips = require('../lib/libvips');
|
||||
const npmLog = require('npmlog');
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
@@ -11,8 +12,8 @@ if (process.platform === 'win32') {
|
||||
const buildReleaseDir = path.join(buildDir, 'Release');
|
||||
npmLog.info('sharp', `Creating ${buildReleaseDir}`);
|
||||
try {
|
||||
fs.mkdirSync(buildDir);
|
||||
fs.mkdirSync(buildReleaseDir);
|
||||
libvips.mkdirSync(buildDir);
|
||||
libvips.mkdirSync(buildReleaseDir);
|
||||
} catch (err) {}
|
||||
const vendorLibDir = path.join(__dirname, '..', 'vendor', 'lib');
|
||||
npmLog.info('sharp', `Copying DLLs from ${vendorLibDir} to ${buildReleaseDir}`);
|
||||
|
||||
@@ -9,13 +9,32 @@ const npmLog = require('npmlog');
|
||||
const semver = require('semver');
|
||||
const simpleGet = require('simple-get');
|
||||
const tar = require('tar');
|
||||
const copyFileSync = require('fs-copy-file-sync');
|
||||
|
||||
const agent = require('../lib/agent');
|
||||
const libvips = require('../lib/libvips');
|
||||
const platform = require('../lib/platform');
|
||||
|
||||
const minimumLibvipsVersion = libvips.minimumLibvipsVersion;
|
||||
const distBaseUrl = process.env.SHARP_DIST_BASE_URL || `https://github.com/lovell/sharp-libvips/releases/download/v${minimumLibvipsVersion}/`;
|
||||
const distBaseUrl = process.env.npm_config_sharp_dist_base_url || process.env.SHARP_DIST_BASE_URL || `https://github.com/lovell/sharp-libvips/releases/download/v${minimumLibvipsVersion}/`;
|
||||
|
||||
const fail = function (err) {
|
||||
npmLog.error('sharp', err.message);
|
||||
npmLog.error('sharp', 'Please see http://sharp.pixelplumbing.com/page/install');
|
||||
process.exit(1);
|
||||
};
|
||||
|
||||
const extractTarball = function (tarPath) {
|
||||
const vendorPath = path.join(__dirname, '..', 'vendor');
|
||||
libvips.mkdirSync(vendorPath);
|
||||
tar
|
||||
.extract({
|
||||
file: tarPath,
|
||||
cwd: vendorPath,
|
||||
strict: true
|
||||
})
|
||||
.catch(fail);
|
||||
};
|
||||
|
||||
try {
|
||||
const useGlobalLibvips = libvips.useGlobalLibvips();
|
||||
@@ -29,54 +48,54 @@ try {
|
||||
} else {
|
||||
// Is this arch/platform supported?
|
||||
const arch = process.env.npm_config_arch || process.arch;
|
||||
if (platform() === 'win32-ia32') {
|
||||
const platformAndArch = platform();
|
||||
if (platformAndArch === 'win32-ia32') {
|
||||
throw new Error('Windows x86 (32-bit) node.exe is not supported');
|
||||
}
|
||||
if (arch === 'ia32') {
|
||||
throw new Error(`Intel Architecture 32-bit systems require manual installation of libvips >= ${minimumLibvipsVersion}\n`);
|
||||
throw new Error(`Intel Architecture 32-bit systems require manual installation of libvips >= ${minimumLibvipsVersion}`);
|
||||
}
|
||||
if (detectLibc.isNonGlibcLinux) {
|
||||
throw new Error(`Use with ${detectLibc.family} libc requires manual installation of libvips >= ${minimumLibvipsVersion}`);
|
||||
if (platformAndArch === 'freebsd-x64' || platformAndArch === 'openbsd-x64' || platformAndArch === 'sunos-x64') {
|
||||
throw new Error(`BSD/SunOS systems require manual installation of libvips >= ${minimumLibvipsVersion}`);
|
||||
}
|
||||
if (detectLibc.family === detectLibc.GLIBC && detectLibc.version && semver.lt(`${detectLibc.version}.0`, '2.13.0')) {
|
||||
throw new Error(`Use with glibc version ${detectLibc.version} requires manual installation of libvips >= ${minimumLibvipsVersion}`);
|
||||
}
|
||||
// Download to per-process temporary file
|
||||
const tarFilename = ['libvips', minimumLibvipsVersion, platform()].join('-') + '.tar.gz';
|
||||
const tarPathTemp = path.join(os.tmpdir(), `${process.pid}-${tarFilename}`);
|
||||
const tmpFile = fs.createWriteStream(tarPathTemp);
|
||||
const url = distBaseUrl + tarFilename;
|
||||
npmLog.info('sharp', `Downloading ${url}`);
|
||||
simpleGet({ url: url, agent: agent() }, function (err, response) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
if (response.statusCode !== 200) {
|
||||
throw new Error(`Status ${response.statusCode}`);
|
||||
}
|
||||
response.pipe(tmpFile);
|
||||
});
|
||||
tmpFile.on('close', function () {
|
||||
const vendorPath = path.join(__dirname, '..', 'vendor');
|
||||
fs.mkdirSync(vendorPath);
|
||||
tar
|
||||
.extract({
|
||||
file: tarPathTemp,
|
||||
cwd: vendorPath,
|
||||
strict: true
|
||||
})
|
||||
.then(function () {
|
||||
try {
|
||||
fs.unlinkSync(tarPathTemp);
|
||||
} catch (err) {}
|
||||
})
|
||||
.catch(function (err) {
|
||||
const tarFilename = ['libvips', minimumLibvipsVersion, platformAndArch].join('-') + '.tar.gz';
|
||||
const tarPathCache = path.join(libvips.cachePath(), tarFilename);
|
||||
if (fs.existsSync(tarPathCache)) {
|
||||
npmLog.info('sharp', `Using cached ${tarPathCache}`);
|
||||
extractTarball(tarPathCache);
|
||||
} else {
|
||||
const tarPathTemp = path.join(os.tmpdir(), `${process.pid}-${tarFilename}`);
|
||||
const tmpFile = fs.createWriteStream(tarPathTemp);
|
||||
const url = distBaseUrl + tarFilename;
|
||||
npmLog.info('sharp', `Downloading ${url}`);
|
||||
simpleGet({ url: url, agent: agent() }, function (err, response) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
if (response.statusCode !== 200) {
|
||||
throw new Error(`Status ${response.statusCode}`);
|
||||
}
|
||||
response.pipe(tmpFile);
|
||||
});
|
||||
tmpFile
|
||||
.on('error', fail)
|
||||
.on('close', function () {
|
||||
try {
|
||||
// Attempt to rename
|
||||
fs.renameSync(tarPathTemp, tarPathCache);
|
||||
} catch (err) {
|
||||
// Fall back to copy and unlink
|
||||
copyFileSync(tarPathTemp, tarPathCache);
|
||||
fs.unlinkSync(tarPathTemp);
|
||||
}
|
||||
extractTarball(tarPathCache);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
npmLog.error('sharp', err.message);
|
||||
npmLog.error('sharp', 'Please see http://sharp.pixelplumbing.com/page/install');
|
||||
process.exit(1);
|
||||
fail(err);
|
||||
}
|
||||
|
||||
@@ -12,6 +12,23 @@ const bool = {
|
||||
eor: 'eor'
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove alpha channel, if any. This is a no-op if the image does not have an alpha channel.
|
||||
*
|
||||
* @example
|
||||
* sharp('rgba.png')
|
||||
* .removeAlpha()
|
||||
* .toFile('rgb.png', function(err, info) {
|
||||
* // rgb.png is a 3 channel image without an alpha channel
|
||||
* });
|
||||
*
|
||||
* @returns {Sharp}
|
||||
*/
|
||||
function removeAlpha () {
|
||||
this.options.removeAlpha = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a single channel from a multi-channel image.
|
||||
*
|
||||
@@ -100,13 +117,12 @@ function bandbool (boolOp) {
|
||||
* @private
|
||||
*/
|
||||
module.exports = function (Sharp) {
|
||||
// Public instance functions
|
||||
[
|
||||
Object.assign(Sharp.prototype, {
|
||||
// Public instance functions
|
||||
removeAlpha,
|
||||
extractChannel,
|
||||
joinChannel,
|
||||
bandbool
|
||||
].forEach(function (f) {
|
||||
Sharp.prototype[f.name] = f;
|
||||
});
|
||||
// Class attributes
|
||||
Sharp.bool = bool;
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
'use strict';
|
||||
|
||||
const deprecate = require('util').deprecate;
|
||||
|
||||
const color = require('color');
|
||||
const is = require('./is');
|
||||
|
||||
@@ -16,25 +18,20 @@ const colourspace = {
|
||||
};
|
||||
|
||||
/**
|
||||
* Set the background for the `embed`, `flatten` and `extend` operations.
|
||||
* The default background is `{r: 0, g: 0, b: 0, alpha: 1}`, black without transparency.
|
||||
*
|
||||
* Delegates to the _color_ module, which can throw an Error
|
||||
* but is liberal in what it accepts, clipping values to sensible min/max.
|
||||
* The alpha value is a float between `0` (transparent) and `1` (opaque).
|
||||
*
|
||||
* @param {String|Object} rgba - parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameter
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function background (rgba) {
|
||||
const colour = color(rgba);
|
||||
this.options.background = [
|
||||
const background = [
|
||||
colour.red(),
|
||||
colour.green(),
|
||||
colour.blue(),
|
||||
Math.round(colour.alpha() * 255)
|
||||
];
|
||||
this.options.resizeBackground = background;
|
||||
this.options.extendBackground = background;
|
||||
this.options.flattenBackground = background.slice(0, 3);
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -80,7 +77,7 @@ function grayscale (grayscale) {
|
||||
/**
|
||||
* Set the output colourspace.
|
||||
* By default output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
|
||||
* @param {String} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/jcupitt/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568)
|
||||
* @param {String} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L568)
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
@@ -102,23 +99,43 @@ function toColorspace (colorspace) {
|
||||
return this.toColourspace(colorspace);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a colour attribute of the this.options Object.
|
||||
* @private
|
||||
* @param {String} key
|
||||
* @param {String|Object} val
|
||||
* @throws {Error} Invalid key
|
||||
*/
|
||||
function _setColourOption (key, val) {
|
||||
if (is.object(val) || is.string(val)) {
|
||||
const colour = color(val);
|
||||
this.options[key] = [
|
||||
colour.red(),
|
||||
colour.green(),
|
||||
colour.blue(),
|
||||
Math.round(colour.alpha() * 255)
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decorate the Sharp prototype with colour-related functions.
|
||||
* @private
|
||||
*/
|
||||
module.exports = function (Sharp) {
|
||||
// Public instance functions
|
||||
[
|
||||
background,
|
||||
Object.assign(Sharp.prototype, {
|
||||
// Public
|
||||
tint,
|
||||
greyscale,
|
||||
grayscale,
|
||||
toColourspace,
|
||||
toColorspace
|
||||
].forEach(function (f) {
|
||||
Sharp.prototype[f.name] = f;
|
||||
toColorspace,
|
||||
// Private
|
||||
_setColourOption
|
||||
});
|
||||
// Class attributes
|
||||
Sharp.colourspace = colourspace;
|
||||
Sharp.colorspace = colourspace;
|
||||
// Deprecated
|
||||
Sharp.prototype.background = deprecate(background, 'background(background) is deprecated, use resize({ background }), extend({ background }) or flatten({ background }) instead');
|
||||
};
|
||||
|
||||
@@ -19,8 +19,7 @@ const is = require('./is');
|
||||
* .overlayWith('overlay.png', { gravity: sharp.gravity.southeast } )
|
||||
* .sharpen()
|
||||
* .withMetadata()
|
||||
* .quality(90)
|
||||
* .webp()
|
||||
* .webp( { quality: 90 } )
|
||||
* .toBuffer()
|
||||
* .then(function(outputBuffer) {
|
||||
* // outputBuffer contains upside down, 300px wide, alpha channel flattened
|
||||
@@ -35,7 +34,7 @@ const is = require('./is');
|
||||
* @param {Number} [options.left] - the pixel offset from the left edge.
|
||||
* @param {Boolean} [options.tile=false] - set to true to repeat the overlay image across the entire image with the given `gravity`.
|
||||
* @param {Boolean} [options.cutout=false] - set to true to apply only the alpha channel of the overlay image to the input image, giving the appearance of one image being cut out of another.
|
||||
* @param {Number} [options.density=72] - integral number representing the DPI for vector overlay image.
|
||||
* @param {Number} [options.density=72] - number representing the DPI for vector overlay image.
|
||||
* @param {Object} [options.raw] - describes overlay when using raw pixel data.
|
||||
* @param {Number} [options.raw.width]
|
||||
* @param {Number} [options.raw.height]
|
||||
|
||||
@@ -4,42 +4,10 @@ const path = require('path');
|
||||
const util = require('util');
|
||||
const stream = require('stream');
|
||||
const events = require('events');
|
||||
const semver = require('semver');
|
||||
const is = require('./is');
|
||||
const platform = require('./platform');
|
||||
const sharp = require('../build/Release/sharp.node');
|
||||
|
||||
// Vendor platform
|
||||
(function () {
|
||||
let vendorPlatformId;
|
||||
try {
|
||||
vendorPlatformId = require('../vendor/platform.json');
|
||||
} catch (err) {
|
||||
return;
|
||||
}
|
||||
const currentPlatformId = platform();
|
||||
/* istanbul ignore if */
|
||||
if (currentPlatformId !== vendorPlatformId) {
|
||||
throw new Error(`'${vendorPlatformId}' binaries cannot be used on the '${currentPlatformId}' platform. Please remove the 'node_modules/sharp/vendor' directory and run 'npm rebuild'.`);
|
||||
}
|
||||
})();
|
||||
|
||||
// Versioning
|
||||
let versions = {
|
||||
vips: sharp.libvipsVersion()
|
||||
};
|
||||
(function () {
|
||||
// Does libvips meet minimum requirement?
|
||||
const libvipsVersionMin = require('../package.json').config.libvips;
|
||||
/* istanbul ignore if */
|
||||
if (semver.lt(versions.vips, libvipsVersionMin)) {
|
||||
throw new Error('Found libvips ' + versions.vips + ' but require at least ' + libvipsVersionMin);
|
||||
}
|
||||
// Include versions of dependencies, if present
|
||||
try {
|
||||
versions = require('../vendor/versions.json');
|
||||
} catch (err) {}
|
||||
})();
|
||||
require('./libvips').hasVendoredLibvips();
|
||||
const sharp = require('bindings')('sharp.node');
|
||||
|
||||
// Use NODE_DEBUG=sharp to enable libvips warnings
|
||||
const debuglog = util.debuglog('sharp');
|
||||
@@ -81,7 +49,7 @@ const debuglog = util.debuglog('sharp');
|
||||
* width: 300,
|
||||
* height: 200,
|
||||
* channels: 4,
|
||||
* background: { r: 255, g: 0, b: 0, alpha: 128 }
|
||||
* background: { r: 255, g: 0, b: 0, alpha: 0.5 }
|
||||
* }
|
||||
* })
|
||||
* .png()
|
||||
@@ -96,7 +64,7 @@ const debuglog = util.debuglog('sharp');
|
||||
* @param {Boolean} [options.failOnError=false] - by default apply a "best effort"
|
||||
* to decode images, even if the data is corrupt or invalid. Set this flag to true
|
||||
* if you'd rather halt processing and raise an error when loading invalid images.
|
||||
* @param {Number} [options.density=72] - integral number representing the DPI for vector images.
|
||||
* @param {Number} [options.density=72] - number representing the DPI for vector images.
|
||||
* @param {Number} [options.page=0] - page number to extract for multi-page input (GIF, TIFF)
|
||||
* @param {Object} [options.raw] - describes raw pixel input image data. See `raw()` for pixel ordering.
|
||||
* @param {Number} [options.raw.width]
|
||||
@@ -136,10 +104,12 @@ const Sharp = function (input, options) {
|
||||
width: -1,
|
||||
height: -1,
|
||||
canvas: 'crop',
|
||||
crop: 0,
|
||||
embed: 0,
|
||||
position: 0,
|
||||
resizeBackground: [0, 0, 0, 255],
|
||||
useExifOrientation: false,
|
||||
angle: 0,
|
||||
rotationAngle: 0,
|
||||
rotationBackground: [0, 0, 0, 255],
|
||||
rotateBeforePreExtract: false,
|
||||
flip: false,
|
||||
flop: false,
|
||||
@@ -147,14 +117,15 @@ const Sharp = function (input, options) {
|
||||
extendBottom: 0,
|
||||
extendLeft: 0,
|
||||
extendRight: 0,
|
||||
extendBackground: [0, 0, 0, 255],
|
||||
withoutEnlargement: false,
|
||||
kernel: 'lanczos3',
|
||||
fastShrinkOnLoad: true,
|
||||
// operations
|
||||
background: [0, 0, 0, 255],
|
||||
tintA: 0,
|
||||
tintB: 0,
|
||||
tintA: 128,
|
||||
tintB: 128,
|
||||
flatten: false,
|
||||
flattenBackground: [0, 0, 0],
|
||||
negate: false,
|
||||
medianSize: 0,
|
||||
blurSigma: 0,
|
||||
@@ -163,14 +134,16 @@ const Sharp = function (input, options) {
|
||||
sharpenJagged: 2,
|
||||
threshold: 0,
|
||||
thresholdGrayscale: true,
|
||||
trimTolerance: 0,
|
||||
trimThreshold: 0,
|
||||
gamma: 0,
|
||||
gammaOut: 0,
|
||||
greyscale: false,
|
||||
normalise: 0,
|
||||
booleanBufferIn: null,
|
||||
booleanFileIn: '',
|
||||
joinChannelIn: [],
|
||||
extractChannel: -1,
|
||||
removeAlpha: false,
|
||||
colourspace: 'srgb',
|
||||
// overlay
|
||||
overlayGravity: 0,
|
||||
@@ -192,6 +165,8 @@ const Sharp = function (input, options) {
|
||||
jpegTrellisQuantisation: false,
|
||||
jpegOvershootDeringing: false,
|
||||
jpegOptimiseScans: false,
|
||||
jpegOptimiseCoding: true,
|
||||
jpegQuantisationTable: 0,
|
||||
pngProgressive: false,
|
||||
pngCompressionLevel: 9,
|
||||
pngAdaptiveFiltering: false,
|
||||
@@ -202,7 +177,11 @@ const Sharp = function (input, options) {
|
||||
tiffQuality: 80,
|
||||
tiffCompression: 'jpeg',
|
||||
tiffPredictor: 'horizontal',
|
||||
tiffPyramid: false,
|
||||
tiffSquash: false,
|
||||
tiffTile: false,
|
||||
tiffTileHeight: 256,
|
||||
tiffTileWidth: 256,
|
||||
tiffXres: 1.0,
|
||||
tiffYres: 1.0,
|
||||
tileSize: 256,
|
||||
@@ -248,7 +227,12 @@ Sharp.format = sharp.format();
|
||||
* @example
|
||||
* console.log(sharp.versions);
|
||||
*/
|
||||
Sharp.versions = versions;
|
||||
Sharp.versions = {
|
||||
vips: sharp.libvipsVersion()
|
||||
};
|
||||
try {
|
||||
Sharp.versions = require('../vendor/versions.json');
|
||||
} catch (err) {}
|
||||
|
||||
/**
|
||||
* Export constructor.
|
||||
|
||||
14
lib/input.js
@@ -36,7 +36,7 @@ function _createInputDescriptor (input, inputOptions, containerOptions) {
|
||||
}
|
||||
// Density
|
||||
if (is.defined(inputOptions.density)) {
|
||||
if (is.integer(inputOptions.density) && is.inRange(inputOptions.density, 1, 2400)) {
|
||||
if (is.inRange(inputOptions.density, 1, 2400)) {
|
||||
inputDescriptor.density = inputOptions.density;
|
||||
} else {
|
||||
throw new Error('Invalid density (1 to 2400) ' + inputOptions.density);
|
||||
@@ -177,12 +177,15 @@ function clone () {
|
||||
* A Promises/A+ promise is returned when `callback` is not provided.
|
||||
*
|
||||
* - `format`: Name of decoder used to decompress image data e.g. `jpeg`, `png`, `webp`, `gif`, `svg`
|
||||
* - `size`: Total size of image in bytes, for Stream and Buffer input only
|
||||
* - `width`: Number of pixels wide (EXIF orientation is not taken into consideration)
|
||||
* - `height`: Number of pixels high (EXIF orientation is not taken into consideration)
|
||||
* - `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/jcupitt/libvips/blob/master/libvips/iofuncs/enumtypes.c#L636)
|
||||
* - `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L636)
|
||||
* - `channels`: Number of bands e.g. `3` for sRGB, `4` for CMYK
|
||||
* - `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...](https://github.com/jcupitt/libvips/blob/master/libvips/iofuncs/enumtypes.c#L672)
|
||||
* - `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...](https://github.com/libvips/libvips/blob/master/libvips/iofuncs/enumtypes.c#L672)
|
||||
* - `density`: Number of pixels per inch (DPI), if present
|
||||
* - `chromaSubsampling`: String containing JPEG chroma subsampling, `4:2:0` or `4:4:4` for RGB, `4:2:0:4` or `4:4:4:4` for CMYK
|
||||
* - `isProgressive`: Boolean indicating whether the image is interlaced using a progressive scan
|
||||
* - `hasProfile`: Boolean indicating the presence of an embedded ICC profile
|
||||
* - `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
|
||||
* - `orientation`: Number value of the EXIF Orientation header, if present
|
||||
@@ -264,6 +267,7 @@ function metadata (callback) {
|
||||
* - `maxX` (x-coordinate of one of the pixel where the maximum lies)
|
||||
* - `maxY` (y-coordinate of one of the pixel where the maximum lies)
|
||||
* - `isOpaque`: Value to identify if the image is opaque or transparent, based on the presence and use of alpha channel
|
||||
* - `entropy`: Histogram-based estimation of greyscale entropy, discarding alpha channel if any (experimental)
|
||||
*
|
||||
* @example
|
||||
* const image = sharp(inputJpg);
|
||||
@@ -358,7 +362,7 @@ function sequentialRead (sequentialRead) {
|
||||
* @private
|
||||
*/
|
||||
module.exports = function (Sharp) {
|
||||
[
|
||||
Object.assign(Sharp.prototype, {
|
||||
// Private
|
||||
_createInputDescriptor,
|
||||
_write,
|
||||
@@ -370,7 +374,5 @@ module.exports = function (Sharp) {
|
||||
stats,
|
||||
limitInputPixels,
|
||||
sequentialRead
|
||||
].forEach(function (f) {
|
||||
Sharp.prototype[f.name] = f;
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,17 +1,38 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const spawnSync = require('child_process').spawnSync;
|
||||
const semver = require('semver');
|
||||
const platform = require('./platform');
|
||||
|
||||
const minimumLibvipsVersion = process.env.npm_package_config_libvips || require('../package.json').config.libvips;
|
||||
const env = process.env;
|
||||
const minimumLibvipsVersion = env.npm_package_config_libvips || require('../package.json').config.libvips;
|
||||
|
||||
const spawnSyncOptions = {
|
||||
encoding: 'utf8',
|
||||
shell: true
|
||||
};
|
||||
|
||||
const mkdirSync = function (dirPath) {
|
||||
try {
|
||||
fs.mkdirSync(dirPath);
|
||||
} catch (err) {
|
||||
if (err.code !== 'EEXIST') {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const cachePath = function () {
|
||||
const npmCachePath = env.npm_config_cache || (env.APPDATA ? path.join(env.APPDATA, 'npm-cache') : path.join(os.homedir(), '.npm'));
|
||||
mkdirSync(npmCachePath);
|
||||
const libvipsCachePath = path.join(npmCachePath, '_libvips');
|
||||
mkdirSync(libvipsCachePath);
|
||||
return libvipsCachePath;
|
||||
};
|
||||
|
||||
const globalLibvipsVersion = function () {
|
||||
if (process.platform !== 'win32') {
|
||||
const globalLibvipsVersion = spawnSync(`PKG_CONFIG_PATH="${pkgConfigPath()}" pkg-config --modversion vips-cpp`, spawnSyncOptions).stdout || '';
|
||||
@@ -23,21 +44,30 @@ const globalLibvipsVersion = function () {
|
||||
|
||||
const hasVendoredLibvips = function () {
|
||||
const currentPlatformId = platform();
|
||||
const vendorPath = path.join(__dirname, '..', 'vendor');
|
||||
let vendorVersionId;
|
||||
let vendorPlatformId;
|
||||
try {
|
||||
const vendorPlatformId = require(path.join(__dirname, '..', 'vendor', 'platform.json'));
|
||||
vendorVersionId = require(path.join(vendorPath, 'versions.json')).vips;
|
||||
vendorPlatformId = require(path.join(vendorPath, 'platform.json'));
|
||||
} catch (err) {}
|
||||
if (vendorVersionId && vendorVersionId !== minimumLibvipsVersion) {
|
||||
throw new Error(`Found vendored libvips v${vendorVersionId} but require v${minimumLibvipsVersion}. Please remove the 'node_modules/sharp/vendor' directory and run 'npm install'.`);
|
||||
}
|
||||
if (vendorPlatformId) {
|
||||
if (currentPlatformId === vendorPlatformId) {
|
||||
return true;
|
||||
} else {
|
||||
throw new Error(`'${vendorPlatformId}' binaries cannot be used on the '${currentPlatformId}' platform. Please remove the 'node_modules/sharp/vendor' directory and run 'npm install'.`);
|
||||
}
|
||||
} catch (err) {}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
const pkgConfigPath = function () {
|
||||
if (process.platform !== 'win32') {
|
||||
const brewPkgConfigPath = spawnSync('which brew >/dev/null 2>&1 && eval $(brew --env) && echo $PKG_CONFIG_LIBDIR', spawnSyncOptions).stdout || '';
|
||||
return [brewPkgConfigPath.trim(), process.env.PKG_CONFIG_PATH, '/usr/local/lib/pkgconfig', '/usr/lib/pkgconfig']
|
||||
return [brewPkgConfigPath.trim(), env.PKG_CONFIG_PATH, '/usr/local/lib/pkgconfig', '/usr/lib/pkgconfig']
|
||||
.filter(function (p) { return !!p; })
|
||||
.join(':');
|
||||
} else {
|
||||
@@ -46,7 +76,7 @@ const pkgConfigPath = function () {
|
||||
};
|
||||
|
||||
const useGlobalLibvips = function () {
|
||||
if (Boolean(process.env.SHARP_IGNORE_GLOBAL_LIBVIPS) === true) {
|
||||
if (Boolean(env.SHARP_IGNORE_GLOBAL_LIBVIPS) === true) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -56,8 +86,10 @@ const useGlobalLibvips = function () {
|
||||
|
||||
module.exports = {
|
||||
minimumLibvipsVersion: minimumLibvipsVersion,
|
||||
cachePath: cachePath,
|
||||
globalLibvipsVersion: globalLibvipsVersion,
|
||||
hasVendoredLibvips: hasVendoredLibvips,
|
||||
pkgConfigPath: pkgConfigPath,
|
||||
useGlobalLibvips: useGlobalLibvips
|
||||
useGlobalLibvips: useGlobalLibvips,
|
||||
mkdirSync: mkdirSync
|
||||
};
|
||||
|
||||
204
lib/operation.js
@@ -1,14 +1,18 @@
|
||||
'use strict';
|
||||
|
||||
const color = require('color');
|
||||
const is = require('./is');
|
||||
|
||||
/**
|
||||
* Rotate the output image by either an explicit angle
|
||||
* or auto-orient based on the EXIF `Orientation` tag.
|
||||
*
|
||||
* If an angle is provided, it is converted to a valid 90/180/270deg rotation.
|
||||
* If an angle is provided, it is converted to a valid positive degree rotation.
|
||||
* For example, `-450` will produce a 270deg rotation.
|
||||
*
|
||||
* When rotating by an angle other than a multiple of 90,
|
||||
* the background colour can be provided with the `background` option.
|
||||
*
|
||||
* If no angle is provided, it is determined from the EXIF data.
|
||||
* Mirroring is supported and may infer the use of a flip operation.
|
||||
*
|
||||
@@ -28,64 +32,30 @@ const is = require('./is');
|
||||
* });
|
||||
* readableStream.pipe(pipeline);
|
||||
*
|
||||
* @param {Number} [angle=auto] angle of rotation, must be a multiple of 90.
|
||||
* @param {Number} [angle=auto] angle of rotation.
|
||||
* @param {Object} [options] - if present, is an Object with optional attributes.
|
||||
* @param {String|Object} [options.background="#000000"] parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function rotate (angle) {
|
||||
function rotate (angle, options) {
|
||||
if (!is.defined(angle)) {
|
||||
this.options.useExifOrientation = true;
|
||||
} else if (is.integer(angle) && !(angle % 90)) {
|
||||
this.options.angle = angle;
|
||||
} else {
|
||||
throw new Error('Unsupported angle: angle must be a positive/negative multiple of 90 ' + angle);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a region of the image.
|
||||
*
|
||||
* - Use `extract` before `resize` for pre-resize extraction.
|
||||
* - Use `extract` after `resize` for post-resize extraction.
|
||||
* - Use `extract` before and after for both.
|
||||
*
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .extract({ left: left, top: top, width: width, height: height })
|
||||
* .toFile(output, function(err) {
|
||||
* // Extract a region of the input image, saving in the same format.
|
||||
* });
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .extract({ left: leftOffsetPre, top: topOffsetPre, width: widthPre, height: heightPre })
|
||||
* .resize(width, height)
|
||||
* .extract({ left: leftOffsetPost, top: topOffsetPost, width: widthPost, height: heightPost })
|
||||
* .toFile(output, function(err) {
|
||||
* // Extract a region, resize, then extract from the resized image
|
||||
* });
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {Number} options.left - zero-indexed offset from left edge
|
||||
* @param {Number} options.top - zero-indexed offset from top edge
|
||||
* @param {Number} options.width - dimension of extracted image
|
||||
* @param {Number} options.height - dimension of extracted image
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function extract (options) {
|
||||
const suffix = this.options.width === -1 && this.options.height === -1 ? 'Pre' : 'Post';
|
||||
['left', 'top', 'width', 'height'].forEach(function (name) {
|
||||
const value = options[name];
|
||||
if (is.integer(value) && value >= 0) {
|
||||
this.options[name + (name === 'left' || name === 'top' ? 'Offset' : '') + suffix] = value;
|
||||
} else {
|
||||
throw new Error('Non-integer value for ' + name + ' of ' + value);
|
||||
} else if (is.number(angle)) {
|
||||
this.options.rotationAngle = angle;
|
||||
if (is.object(options) && options.background) {
|
||||
const backgroundColour = color(options.background);
|
||||
this.options.rotationBackground = [
|
||||
backgroundColour.red(),
|
||||
backgroundColour.green(),
|
||||
backgroundColour.blue(),
|
||||
Math.round(backgroundColour.alpha() * 255)
|
||||
];
|
||||
}
|
||||
}, this);
|
||||
// Ensure existing rotation occurs before pre-resize extraction
|
||||
if (suffix === 'Pre' && ((this.options.angle % 360) !== 0 || this.options.useExifOrientation === true)) {
|
||||
this.options.rotateBeforePreExtract = true;
|
||||
} else {
|
||||
throw new Error('Unsupported angle: must be a number.');
|
||||
}
|
||||
return this;
|
||||
}
|
||||
@@ -201,72 +171,15 @@ function blur (sigma) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Extends/pads the edges of the image with the colour provided to the `background` method.
|
||||
* This operation will always occur after resizing and extraction, if any.
|
||||
*
|
||||
* @example
|
||||
* // Resize to 140 pixels wide, then add 10 transparent pixels
|
||||
* // to the top, left and right edges and 20 to the bottom edge
|
||||
* sharp(input)
|
||||
* .resize(140)
|
||||
* .background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
* .extend({top: 10, bottom: 20, left: 10, right: 10})
|
||||
* ...
|
||||
*
|
||||
* @param {(Number|Object)} extend - single pixel count to add to all edges or an Object with per-edge counts
|
||||
* @param {Number} [extend.top]
|
||||
* @param {Number} [extend.left]
|
||||
* @param {Number} [extend.bottom]
|
||||
* @param {Number} [extend.right]
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function extend (extend) {
|
||||
if (is.integer(extend) && extend > 0) {
|
||||
this.options.extendTop = extend;
|
||||
this.options.extendBottom = extend;
|
||||
this.options.extendLeft = extend;
|
||||
this.options.extendRight = extend;
|
||||
} else if (
|
||||
is.object(extend) &&
|
||||
is.integer(extend.top) && extend.top >= 0 &&
|
||||
is.integer(extend.bottom) && extend.bottom >= 0 &&
|
||||
is.integer(extend.left) && extend.left >= 0 &&
|
||||
is.integer(extend.right) && extend.right >= 0
|
||||
) {
|
||||
this.options.extendTop = extend.top;
|
||||
this.options.extendBottom = extend.bottom;
|
||||
this.options.extendLeft = extend.left;
|
||||
this.options.extendRight = extend.right;
|
||||
} else {
|
||||
throw new Error('Invalid edge extension ' + extend);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge alpha transparency channel, if any, with `background`.
|
||||
* @param {Boolean} [flatten=true]
|
||||
* Merge alpha transparency channel, if any, with a background.
|
||||
* @param {Object} [options]
|
||||
* @param {String|Object} [options.background={r: 0, g: 0, b: 0}] - background colour, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black.
|
||||
* @returns {Sharp}
|
||||
*/
|
||||
function flatten (flatten) {
|
||||
this.options.flatten = is.bool(flatten) ? flatten : true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trim "boring" pixels from all edges that contain values within a percentage similarity of the top-left pixel.
|
||||
* @param {Number} [tolerance=10] value between 1 and 99 representing the percentage similarity.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function trim (tolerance) {
|
||||
if (!is.defined(tolerance)) {
|
||||
this.options.trimTolerance = 10;
|
||||
} else if (is.integer(tolerance) && is.inRange(tolerance, 1, 99)) {
|
||||
this.options.trimTolerance = tolerance;
|
||||
} else {
|
||||
throw new Error('Invalid trim tolerance (1 to 99) ' + tolerance);
|
||||
function flatten (options) {
|
||||
this.options.flatten = is.bool(options) ? options : true;
|
||||
if (is.object(options)) {
|
||||
this._setColourOption('flattenBackground', options.background);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
@@ -277,11 +190,15 @@ function trim (tolerance) {
|
||||
* This can improve the perceived brightness of a resized image in non-linear colour spaces.
|
||||
* JPEG and WebP input images will not take advantage of the shrink-on-load performance optimisation
|
||||
* when applying a gamma correction.
|
||||
*
|
||||
* Supply a second argument to use a different output gamma value, otherwise the first value is used in both cases.
|
||||
*
|
||||
* @param {Number} [gamma=2.2] value between 1.0 and 3.0.
|
||||
* @param {Number} [gammaOut] value between 1.0 and 3.0. (optional, defaults to same as `gamma`)
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function gamma (gamma) {
|
||||
function gamma (gamma, gammaOut) {
|
||||
if (!is.defined(gamma)) {
|
||||
// Default gamma correction of 2.2 (sRGB)
|
||||
this.options.gamma = 2.2;
|
||||
@@ -290,6 +207,14 @@ function gamma (gamma) {
|
||||
} else {
|
||||
throw new Error('Invalid gamma correction (1.0 to 3.0) ' + gamma);
|
||||
}
|
||||
if (!is.defined(gammaOut)) {
|
||||
// Default gamma correction for output is same as input
|
||||
this.options.gammaOut = this.options.gamma;
|
||||
} else if (is.number(gammaOut) && is.inRange(gammaOut, 1, 3)) {
|
||||
this.options.gammaOut = gammaOut;
|
||||
} else {
|
||||
throw new Error('Invalid output gamma correction (1.0 to 3.0) ' + gammaOut);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -453,22 +378,56 @@ function linear (a, b) {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recomb the image with the specified matrix.
|
||||
*
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .recomb([
|
||||
* [0.3588, 0.7044, 0.1368],
|
||||
* [0.2990, 0.5870, 0.1140],
|
||||
* [0.2392, 0.4696, 0.0912],
|
||||
* ])
|
||||
* .raw()
|
||||
* .toBuffer(function(err, data, info) {
|
||||
* // data contains the raw pixel data after applying the recomb
|
||||
* // With this example input, a sepia filter has been applied
|
||||
* });
|
||||
*
|
||||
* @param {Array<Array<Number>>} 3x3 Recombination matrix
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function recomb (inputMatrix) {
|
||||
if (!Array.isArray(inputMatrix) || inputMatrix.length !== 3 ||
|
||||
inputMatrix[0].length !== 3 ||
|
||||
inputMatrix[1].length !== 3 ||
|
||||
inputMatrix[2].length !== 3
|
||||
) {
|
||||
// must pass in a kernel
|
||||
throw new Error('Invalid Recomb Matrix');
|
||||
}
|
||||
this.options.recombMatrix = [
|
||||
inputMatrix[0][0], inputMatrix[0][1], inputMatrix[0][2],
|
||||
inputMatrix[1][0], inputMatrix[1][1], inputMatrix[1][2],
|
||||
inputMatrix[2][0], inputMatrix[2][1], inputMatrix[2][2]
|
||||
].map(Number);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decorate the Sharp prototype with operation-related functions.
|
||||
* @private
|
||||
*/
|
||||
module.exports = function (Sharp) {
|
||||
[
|
||||
Object.assign(Sharp.prototype, {
|
||||
rotate,
|
||||
extract,
|
||||
flip,
|
||||
flop,
|
||||
sharpen,
|
||||
median,
|
||||
blur,
|
||||
extend,
|
||||
flatten,
|
||||
trim,
|
||||
gamma,
|
||||
negate,
|
||||
normalise,
|
||||
@@ -476,8 +435,7 @@ module.exports = function (Sharp) {
|
||||
convolve,
|
||||
threshold,
|
||||
boolean,
|
||||
linear
|
||||
].forEach(function (f) {
|
||||
Sharp.prototype[f.name] = f;
|
||||
linear,
|
||||
recomb
|
||||
});
|
||||
};
|
||||
|
||||
152
lib/output.js
@@ -148,6 +148,10 @@ function withMetadata (withMetadata) {
|
||||
* @param {Boolean} [options.overshootDeringing=false] - apply overshoot deringing, requires mozjpeg
|
||||
* @param {Boolean} [options.optimiseScans=false] - optimise progressive scans, forces progressive, requires mozjpeg
|
||||
* @param {Boolean} [options.optimizeScans=false] - alternative spelling of optimiseScans
|
||||
* @param {Boolean} [options.optimiseCoding=true] - optimise Huffman coding tables
|
||||
* @param {Boolean} [options.optimizeCoding=true] - alternative spelling of optimiseCoding
|
||||
* @param {Number} [options.quantisationTable=0] - quantization table to use, integer 0-8, requires mozjpeg
|
||||
* @param {Number} [options.quantizationTable=0] - alternative spelling of quantisationTable
|
||||
* @param {Boolean} [options.force=true] - force JPEG output, otherwise attempt to use input format
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid options
|
||||
@@ -185,6 +189,18 @@ function jpeg (options) {
|
||||
this.options.jpegProgressive = true;
|
||||
}
|
||||
}
|
||||
options.optimiseCoding = is.bool(options.optimizeCoding) ? options.optimizeCoding : options.optimiseCoding;
|
||||
if (is.defined(options.optimiseCoding)) {
|
||||
this._setBooleanOption('jpegOptimiseCoding', options.optimiseCoding);
|
||||
}
|
||||
options.quantisationTable = is.number(options.quantizationTable) ? options.quantizationTable : options.quantisationTable;
|
||||
if (is.defined(options.quantisationTable)) {
|
||||
if (is.integer(options.quantisationTable) && is.inRange(options.quantisationTable, 0, 8)) {
|
||||
this.options.jpegQuantisationTable = options.quantisationTable;
|
||||
} else {
|
||||
throw new Error('Invalid quantisation table (integer, 0-8) ' + options.quantisationTable);
|
||||
}
|
||||
}
|
||||
}
|
||||
return this._updateFormatOut('jpeg', options);
|
||||
}
|
||||
@@ -255,10 +271,10 @@ function webp (options) {
|
||||
}
|
||||
}
|
||||
if (is.object(options) && is.defined(options.alphaQuality)) {
|
||||
if (is.integer(options.alphaQuality) && is.inRange(options.alphaQuality, 1, 100)) {
|
||||
if (is.integer(options.alphaQuality) && is.inRange(options.alphaQuality, 0, 100)) {
|
||||
this.options.webpAlphaQuality = options.alphaQuality;
|
||||
} else {
|
||||
throw new Error('Invalid webp alpha quality (integer, 1-100) ' + options.alphaQuality);
|
||||
throw new Error('Invalid webp alpha quality (integer, 0-100) ' + options.alphaQuality);
|
||||
}
|
||||
}
|
||||
if (is.object(options) && is.defined(options.lossless)) {
|
||||
@@ -288,6 +304,10 @@ function webp (options) {
|
||||
* @param {Boolean} [options.force=true] - force TIFF output, otherwise attempt to use input format
|
||||
* @param {Boolean} [options.compression='jpeg'] - compression options: lzw, deflate, jpeg, ccittfax4
|
||||
* @param {Boolean} [options.predictor='horizontal'] - compression predictor options: none, horizontal, float
|
||||
* @param {Boolean} [options.pyramid=false] - write an image pyramid
|
||||
* @param {Boolean} [options.tile=false] - write a tiled tiff
|
||||
* @param {Boolean} [options.tileWidth=256] - horizontal tile size
|
||||
* @param {Boolean} [options.tileHeight=256] - vertical tile size
|
||||
* @param {Number} [options.xres=1.0] - horizontal resolution in pixels/mm
|
||||
* @param {Number} [options.yres=1.0] - vertical resolution in pixels/mm
|
||||
* @param {Boolean} [options.squash=false] - squash 8-bit images down to 1 bit
|
||||
@@ -295,51 +315,83 @@ function webp (options) {
|
||||
* @throws {Error} Invalid options
|
||||
*/
|
||||
function tiff (options) {
|
||||
if (is.object(options) && is.defined(options.quality)) {
|
||||
if (is.integer(options.quality) && is.inRange(options.quality, 1, 100)) {
|
||||
this.options.tiffQuality = options.quality;
|
||||
} else {
|
||||
throw new Error('Invalid quality (integer, 1-100) ' + options.quality);
|
||||
if (is.object(options)) {
|
||||
if (is.defined(options.quality)) {
|
||||
if (is.integer(options.quality) && is.inRange(options.quality, 1, 100)) {
|
||||
this.options.tiffQuality = options.quality;
|
||||
} else {
|
||||
throw new Error('Invalid quality (integer, 1-100) ' + options.quality);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (is.object(options) && is.defined(options.squash)) {
|
||||
if (is.bool(options.squash)) {
|
||||
this.options.tiffSquash = options.squash;
|
||||
} else {
|
||||
throw new Error('Invalid Value for squash ' + options.squash + ' Only Boolean Values allowed for options.squash.');
|
||||
if (is.defined(options.squash)) {
|
||||
if (is.bool(options.squash)) {
|
||||
this.options.tiffSquash = options.squash;
|
||||
} else {
|
||||
throw new Error('Invalid Value for squash ' + options.squash + ' Only Boolean Values allowed for options.squash.');
|
||||
}
|
||||
}
|
||||
}
|
||||
// resolution
|
||||
if (is.object(options) && is.defined(options.xres)) {
|
||||
if (is.number(options.xres)) {
|
||||
this.options.tiffXres = options.xres;
|
||||
} else {
|
||||
throw new Error('Invalid Value for xres ' + options.xres + ' Only numeric values allowed for options.xres');
|
||||
// tiling
|
||||
if (is.defined(options.tile)) {
|
||||
if (is.bool(options.tile)) {
|
||||
this.options.tiffTile = options.tile;
|
||||
} else {
|
||||
throw new Error('Invalid Value for tile ' + options.tile + ' Only Boolean values allowed for options.tile');
|
||||
}
|
||||
}
|
||||
}
|
||||
if (is.object(options) && is.defined(options.yres)) {
|
||||
if (is.number(options.yres)) {
|
||||
this.options.tiffYres = options.yres;
|
||||
} else {
|
||||
throw new Error('Invalid Value for yres ' + options.yres + ' Only numeric values allowed for options.yres');
|
||||
if (is.defined(options.tileWidth)) {
|
||||
if (is.number(options.tileWidth) && options.tileWidth > 0) {
|
||||
this.options.tiffTileWidth = options.tileWidth;
|
||||
} else {
|
||||
throw new Error('Invalid Value for tileWidth ' + options.tileWidth + ' Only positive numeric values allowed for options.tileWidth');
|
||||
}
|
||||
}
|
||||
}
|
||||
// compression
|
||||
if (is.defined(options) && is.defined(options.compression)) {
|
||||
if (is.string(options.compression) && is.inArray(options.compression, ['lzw', 'deflate', 'jpeg', 'ccittfax4', 'none'])) {
|
||||
this.options.tiffCompression = options.compression;
|
||||
} else {
|
||||
const message = `Invalid compression option "${options.compression}". Should be one of: lzw, deflate, jpeg, ccittfax4, none`;
|
||||
throw new Error(message);
|
||||
if (is.defined(options.tileHeight)) {
|
||||
if (is.number(options.tileHeight) && options.tileHeight > 0) {
|
||||
this.options.tiffTileHeight = options.tileHeight;
|
||||
} else {
|
||||
throw new Error('Invalid Value for tileHeight ' + options.tileHeight + ' Only positive numeric values allowed for options.tileHeight');
|
||||
}
|
||||
}
|
||||
}
|
||||
// predictor
|
||||
if (is.defined(options) && is.defined(options.predictor)) {
|
||||
if (is.string(options.predictor) && is.inArray(options.predictor, ['none', 'horizontal', 'float'])) {
|
||||
this.options.tiffPredictor = options.predictor;
|
||||
} else {
|
||||
const message = `Invalid predictor option "${options.predictor}". Should be one of: none, horizontal, float`;
|
||||
throw new Error(message);
|
||||
// pyramid
|
||||
if (is.defined(options.pyramid)) {
|
||||
if (is.bool(options.pyramid)) {
|
||||
this.options.tiffPyramid = options.pyramid;
|
||||
} else {
|
||||
throw new Error('Invalid Value for pyramid ' + options.pyramid + ' Only Boolean values allowed for options.pyramid');
|
||||
}
|
||||
}
|
||||
// resolution
|
||||
if (is.defined(options.xres)) {
|
||||
if (is.number(options.xres)) {
|
||||
this.options.tiffXres = options.xres;
|
||||
} else {
|
||||
throw new Error('Invalid Value for xres ' + options.xres + ' Only numeric values allowed for options.xres');
|
||||
}
|
||||
}
|
||||
if (is.defined(options.yres)) {
|
||||
if (is.number(options.yres)) {
|
||||
this.options.tiffYres = options.yres;
|
||||
} else {
|
||||
throw new Error('Invalid Value for yres ' + options.yres + ' Only numeric values allowed for options.yres');
|
||||
}
|
||||
}
|
||||
// compression
|
||||
if (is.defined(options.compression)) {
|
||||
if (is.string(options.compression) && is.inArray(options.compression, ['lzw', 'deflate', 'jpeg', 'ccittfax4', 'none'])) {
|
||||
this.options.tiffCompression = options.compression;
|
||||
} else {
|
||||
const message = `Invalid compression option "${options.compression}". Should be one of: lzw, deflate, jpeg, ccittfax4, none`;
|
||||
throw new Error(message);
|
||||
}
|
||||
}
|
||||
// predictor
|
||||
if (is.defined(options.predictor)) {
|
||||
if (is.string(options.predictor) && is.inArray(options.predictor, ['none', 'horizontal', 'float'])) {
|
||||
this.options.tiffPredictor = options.predictor;
|
||||
} else {
|
||||
const message = `Invalid predictor option "${options.predictor}". Should be one of: none, horizontal, float`;
|
||||
throw new Error(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
return this._updateFormatOut('tiff', options);
|
||||
@@ -390,6 +442,8 @@ function toFormat (format, options) {
|
||||
* Set the format and options for tile images via the `toFormat`, `jpeg`, `png` or `webp` functions.
|
||||
* Use a `.zip` or `.szi` file extension with `toFile` to write to a compressed archive file format.
|
||||
*
|
||||
* Warning: multiple sharp instances concurrently producing tile output can expose a possible race condition in some versions of libgsf.
|
||||
*
|
||||
* @example
|
||||
* sharp('input.tiff')
|
||||
* .png()
|
||||
@@ -405,6 +459,7 @@ function toFormat (format, options) {
|
||||
* @param {Number} [tile.size=256] tile size in pixels, a value between 1 and 8192.
|
||||
* @param {Number} [tile.overlap=0] tile overlap in pixels, a value between 0 and 8192.
|
||||
* @param {Number} [tile.angle=0] tile angle of rotation, must be a multiple of 90.
|
||||
* @param {String} [tile.depth] how deep to make the pyramid, possible values are `onepixel`, `onetile` or `one`, default based on layout.
|
||||
* @param {String} [tile.container='fs'] tile container, with value `fs` (filesystem) or `zip` (compressed file).
|
||||
* @param {String} [tile.layout='dz'] filesystem layout, possible values are `dz`, `zoomify` or `google`.
|
||||
* @returns {Sharp}
|
||||
@@ -456,6 +511,15 @@ function tile (tile) {
|
||||
throw new Error('Unsupported angle: angle must be a positive/negative multiple of 90 ' + tile.angle);
|
||||
}
|
||||
}
|
||||
|
||||
// Depth of tiles
|
||||
if (is.defined(tile.depth)) {
|
||||
if (is.string(tile.depth) && is.inArray(tile.depth, ['onepixel', 'onetile', 'one'])) {
|
||||
this.options.tileDepth = tile.depth;
|
||||
} else {
|
||||
throw new Error("Invalid tile depth '" + tile.depth + "', should be one of 'onepixel', 'onetile' or 'one'");
|
||||
}
|
||||
}
|
||||
}
|
||||
// Format
|
||||
if (is.inArray(this.options.formatOut, ['jpeg', 'png', 'webp'])) {
|
||||
@@ -613,7 +677,7 @@ function _pipeline (callback) {
|
||||
* @private
|
||||
*/
|
||||
module.exports = function (Sharp) {
|
||||
[
|
||||
Object.assign(Sharp.prototype, {
|
||||
// Public
|
||||
toFile,
|
||||
toBuffer,
|
||||
@@ -630,7 +694,5 @@ module.exports = function (Sharp) {
|
||||
_setBooleanOption,
|
||||
_read,
|
||||
_pipeline
|
||||
].forEach(function (f) {
|
||||
Sharp.prototype[f.name] = f;
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
const detectLibc = require('detect-libc');
|
||||
|
||||
module.exports = function () {
|
||||
const arch = process.env.npm_config_arch || process.arch;
|
||||
const platform = process.env.npm_config_platform || process.platform;
|
||||
const libc = (platform === 'linux' && detectLibc.isNonGlibcLinux) ? detectLibc.family : '';
|
||||
|
||||
const platformId = [platform];
|
||||
const platformId = [`${platform}${libc}`];
|
||||
if (arch === 'arm' || arch === 'armhf' || arch === 'arm64') {
|
||||
const armVersion = (arch === 'arm64') ? '8' : process.env.npm_config_armv || process.config.variables.arm_version || '6';
|
||||
platformId.push(`armv${armVersion}`);
|
||||
|
||||
423
lib/resize.js
@@ -1,9 +1,10 @@
|
||||
'use strict';
|
||||
|
||||
const deprecate = require('util').deprecate;
|
||||
const is = require('./is');
|
||||
|
||||
/**
|
||||
* Weighting to apply to image crop.
|
||||
* Weighting to apply when using contain/cover fit.
|
||||
* @member
|
||||
* @private
|
||||
*/
|
||||
@@ -21,7 +22,23 @@ const gravity = {
|
||||
};
|
||||
|
||||
/**
|
||||
* Strategies for automagic crop behaviour.
|
||||
* Position to apply when using contain/cover fit.
|
||||
* @member
|
||||
* @private
|
||||
*/
|
||||
const position = {
|
||||
top: 1,
|
||||
right: 2,
|
||||
bottom: 3,
|
||||
left: 4,
|
||||
'right top': 5,
|
||||
'right bottom': 6,
|
||||
'left bottom': 7,
|
||||
'left top': 8
|
||||
};
|
||||
|
||||
/**
|
||||
* Strategies for automagic cover behaviour.
|
||||
* @member
|
||||
* @private
|
||||
*/
|
||||
@@ -38,45 +55,144 @@ const strategy = {
|
||||
const kernel = {
|
||||
nearest: 'nearest',
|
||||
cubic: 'cubic',
|
||||
mitchell: 'mitchell',
|
||||
lanczos2: 'lanczos2',
|
||||
lanczos3: 'lanczos3'
|
||||
};
|
||||
|
||||
/**
|
||||
* Resize image to `width` x `height`.
|
||||
* By default, the resized image is centre cropped to the exact size specified.
|
||||
* Methods by which an image can be resized to fit the provided dimensions.
|
||||
* @member
|
||||
* @private
|
||||
*/
|
||||
const fit = {
|
||||
contain: 'contain',
|
||||
cover: 'cover',
|
||||
fill: 'fill',
|
||||
inside: 'inside',
|
||||
outside: 'outside'
|
||||
};
|
||||
|
||||
/**
|
||||
* Map external fit property to internal canvas property.
|
||||
* @member
|
||||
* @private
|
||||
*/
|
||||
const mapFitToCanvas = {
|
||||
contain: 'embed',
|
||||
cover: 'crop',
|
||||
fill: 'ignore_aspect',
|
||||
inside: 'max',
|
||||
outside: 'min'
|
||||
};
|
||||
|
||||
/**
|
||||
* Resize image to `width`, `height` or `width x height`.
|
||||
*
|
||||
* Possible kernels are:
|
||||
* When both a `width` and `height` are provided, the possible methods by which the image should **fit** these are:
|
||||
* - `cover`: Crop to cover both provided dimensions (the default).
|
||||
* - `contain`: Embed within both provided dimensions.
|
||||
* - `fill`: Ignore the aspect ratio of the input and stretch to both provided dimensions.
|
||||
* - `inside`: Preserving aspect ratio, resize the image to be as large as possible while ensuring its dimensions are less than or equal to both those specified.
|
||||
* - `outside`: Preserving aspect ratio, resize the image to be as small as possible while ensuring its dimensions are greater than or equal to both those specified.
|
||||
* Some of these values are based on the [object-fit](https://developer.mozilla.org/en-US/docs/Web/CSS/object-fit) CSS property.
|
||||
*
|
||||
* When using a `fit` of `cover` or `contain`, the default **position** is `centre`. Other options are:
|
||||
* - `sharp.position`: `top`, `right top`, `right`, `right bottom`, `bottom`, `left bottom`, `left`, `left top`.
|
||||
* - `sharp.gravity`: `north`, `northeast`, `east`, `southeast`, `south`, `southwest`, `west`, `northwest`, `center` or `centre`.
|
||||
* - `sharp.strategy`: `cover` only, dynamically crop using either the `entropy` or `attention` strategy.
|
||||
* Some of these values are based on the [object-position](https://developer.mozilla.org/en-US/docs/Web/CSS/object-position) CSS property.
|
||||
*
|
||||
* The experimental strategy-based approach resizes so one dimension is at its target length
|
||||
* then repeatedly ranks edge regions, discarding the edge with the lowest score based on the selected strategy.
|
||||
* - `entropy`: focus on the region with the highest [Shannon entropy](https://en.wikipedia.org/wiki/Entropy_%28information_theory%29).
|
||||
* - `attention`: focus on the region with the highest luminance frequency, colour saturation and presence of skin tones.
|
||||
*
|
||||
* Possible interpolation kernels are:
|
||||
* - `nearest`: Use [nearest neighbour interpolation](http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation).
|
||||
* - `cubic`: Use a [Catmull-Rom spline](https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline).
|
||||
* - `mitchell`: Use a [Mitchell-Netravali spline](https://www.cs.utexas.edu/~fussell/courses/cs384g-fall2013/lectures/mitchell/Mitchell.pdf).
|
||||
* - `lanczos2`: Use a [Lanczos kernel](https://en.wikipedia.org/wiki/Lanczos_resampling#Lanczos_kernel) with `a=2`.
|
||||
* - `lanczos3`: Use a Lanczos kernel with `a=3` (the default).
|
||||
*
|
||||
* @example
|
||||
* sharp(inputBuffer)
|
||||
* sharp(input)
|
||||
* .resize({ width: 100 })
|
||||
* .toBuffer()
|
||||
* .then(data => {
|
||||
* // 100 pixels wide, auto-scaled height
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .resize({ height: 100 })
|
||||
* .toBuffer()
|
||||
* .then(data => {
|
||||
* // 100 pixels high, auto-scaled width
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .resize(200, 300, {
|
||||
* kernel: sharp.kernel.nearest
|
||||
* kernel: sharp.kernel.nearest,
|
||||
* fit: 'contain',
|
||||
* position: 'right top',
|
||||
* background: { r: 255, g: 255, b: 255, alpha: 0.5 }
|
||||
* })
|
||||
* .background('white')
|
||||
* .embed()
|
||||
* .toFile('output.tiff')
|
||||
* .then(function() {
|
||||
* // output.tiff is a 200 pixels wide and 300 pixels high image
|
||||
* // containing a nearest-neighbour scaled version, embedded on a white canvas,
|
||||
* // of the image data in inputBuffer
|
||||
* .toFile('output.png')
|
||||
* .then(() => {
|
||||
* // output.png is a 200 pixels wide and 300 pixels high image
|
||||
* // containing a nearest-neighbour scaled version
|
||||
* // contained within the north-east corner of a semi-transparent white canvas
|
||||
* });
|
||||
*
|
||||
* @example
|
||||
* const transformer = sharp()
|
||||
* .resize({
|
||||
* width: 200,
|
||||
* height: 200,
|
||||
* fit: sharp.fit.cover,
|
||||
* position: sharp.strategy.entropy
|
||||
* });
|
||||
* // Read image data from readableStream
|
||||
* // Write 200px square auto-cropped image data to writableStream
|
||||
* readableStream
|
||||
* .pipe(transformer)
|
||||
* .pipe(writableStream);
|
||||
*
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .resize(200, 200, {
|
||||
* fit: sharp.fit.inside,
|
||||
* withoutEnlargement: true
|
||||
* })
|
||||
* .toFormat('jpeg')
|
||||
* .toBuffer()
|
||||
* .then(function(outputBuffer) {
|
||||
* // outputBuffer contains JPEG image data
|
||||
* // no wider and no higher than 200 pixels
|
||||
* // and no larger than the input image
|
||||
* });
|
||||
*
|
||||
* @param {Number} [width] - pixels wide the resultant image should be. Use `null` or `undefined` to auto-scale the width to match the height.
|
||||
* @param {Number} [height] - pixels high the resultant image should be. Use `null` or `undefined` to auto-scale the height to match the width.
|
||||
* @param {Object} [options]
|
||||
* @param {String} [options.width] - alternative means of specifying `width`. If both are present this take priority.
|
||||
* @param {String} [options.height] - alternative means of specifying `height`. If both are present this take priority.
|
||||
* @param {String} [options.fit='cover'] - how the image should be resized to fit both provided dimensions, one of `cover`, `contain`, `fill`, `inside` or `outside`.
|
||||
* @param {String} [options.position='centre'] - position, gravity or strategy to use when `fit` is `cover` or `contain`.
|
||||
* @param {String|Object} [options.background={r: 0, g: 0, b: 0, alpha: 1}] - background colour when using a `fit` of `contain`, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black without transparency.
|
||||
* @param {String} [options.kernel='lanczos3'] - the kernel to use for image reduction.
|
||||
* @param {Boolean} [options.withoutEnlargement=false] - do not enlarge if the width *or* height are already less than the specified dimensions, equivalent to GraphicsMagick's `>` geometry option.
|
||||
* @param {Boolean} [options.fastShrinkOnLoad=true] - take greater advantage of the JPEG and WebP shrink-on-load feature, which can lead to a slight moiré pattern on some images.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function resize (width, height, options) {
|
||||
if (is.defined(width)) {
|
||||
if (is.integer(width) && width > 0) {
|
||||
if (is.object(width) && !is.defined(options)) {
|
||||
options = width;
|
||||
} else if (is.integer(width) && width > 0) {
|
||||
this.options.width = width;
|
||||
} else {
|
||||
throw is.invalidParameterError('width', 'positive integer', width);
|
||||
@@ -94,6 +210,38 @@ function resize (width, height, options) {
|
||||
this.options.height = -1;
|
||||
}
|
||||
if (is.object(options)) {
|
||||
// Width
|
||||
if (is.integer(options.width) && options.width > 0) {
|
||||
this.options.width = options.width;
|
||||
}
|
||||
// Height
|
||||
if (is.integer(options.height) && options.height > 0) {
|
||||
this.options.height = options.height;
|
||||
}
|
||||
// Fit
|
||||
if (is.defined(options.fit)) {
|
||||
const canvas = mapFitToCanvas[options.fit];
|
||||
if (is.string(canvas)) {
|
||||
this.options.canvas = canvas;
|
||||
} else {
|
||||
throw is.invalidParameterError('fit', 'valid fit', options.fit);
|
||||
}
|
||||
}
|
||||
// Position
|
||||
if (is.defined(options.position)) {
|
||||
const pos = is.integer(options.position)
|
||||
? options.position
|
||||
: strategy[options.position] || position[options.position] || gravity[options.position];
|
||||
if (is.integer(pos) && (is.inRange(pos, 0, 8) || is.inRange(pos, 16, 17))) {
|
||||
this.options.position = pos;
|
||||
} else {
|
||||
throw is.invalidParameterError('position', 'valid position/gravity/strategy', options.position);
|
||||
}
|
||||
}
|
||||
// Background
|
||||
if (is.defined(options.background)) {
|
||||
this._setColourOption('resizeBackground', options.background);
|
||||
}
|
||||
// Kernel
|
||||
if (is.defined(options.kernel)) {
|
||||
if (is.string(kernel[options.kernel])) {
|
||||
@@ -102,6 +250,10 @@ function resize (width, height, options) {
|
||||
throw is.invalidParameterError('kernel', 'valid kernel name', options.kernel);
|
||||
}
|
||||
}
|
||||
// Without enlargement
|
||||
if (is.defined(options.withoutEnlargement)) {
|
||||
this._setBooleanOption('withoutEnlargement', options.withoutEnlargement);
|
||||
}
|
||||
// Shrink on load
|
||||
if (is.defined(options.fastShrinkOnLoad)) {
|
||||
this._setBooleanOption('fastShrinkOnLoad', options.fastShrinkOnLoad);
|
||||
@@ -111,48 +263,144 @@ function resize (width, height, options) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Crop the resized image to the exact size specified, the default behaviour.
|
||||
*
|
||||
* Possible attributes of the optional `sharp.gravity` are `north`, `northeast`, `east`, `southeast`, `south`,
|
||||
* `southwest`, `west`, `northwest`, `center` and `centre`.
|
||||
*
|
||||
* The experimental strategy-based approach resizes so one dimension is at its target length
|
||||
* then repeatedly ranks edge regions, discarding the edge with the lowest score based on the selected strategy.
|
||||
* - `entropy`: focus on the region with the highest [Shannon entropy](https://en.wikipedia.org/wiki/Entropy_%28information_theory%29).
|
||||
* - `attention`: focus on the region with the highest luminance frequency, colour saturation and presence of skin tones.
|
||||
* Extends/pads the edges of the image with the provided background colour.
|
||||
* This operation will always occur after resizing and extraction, if any.
|
||||
*
|
||||
* @example
|
||||
* const transformer = sharp()
|
||||
* .resize(200, 200)
|
||||
* .crop(sharp.strategy.entropy)
|
||||
* .on('error', function(err) {
|
||||
* console.log(err);
|
||||
* });
|
||||
* // Read image data from readableStream
|
||||
* // Write 200px square auto-cropped image data to writableStream
|
||||
* readableStream.pipe(transformer).pipe(writableStream);
|
||||
* // Resize to 140 pixels wide, then add 10 transparent pixels
|
||||
* // to the top, left and right edges and 20 to the bottom edge
|
||||
* sharp(input)
|
||||
* .resize(140)
|
||||
* .extend({
|
||||
* top: 10,
|
||||
* bottom: 20,
|
||||
* left: 10,
|
||||
* right: 10
|
||||
* background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
* })
|
||||
* ...
|
||||
*
|
||||
* @param {String} [crop='centre'] - A member of `sharp.gravity` to crop to an edge/corner or `sharp.strategy` to crop dynamically.
|
||||
* @param {(Number|Object)} extend - single pixel count to add to all edges or an Object with per-edge counts
|
||||
* @param {Number} [extend.top]
|
||||
* @param {Number} [extend.left]
|
||||
* @param {Number} [extend.bottom]
|
||||
* @param {Number} [extend.right]
|
||||
* @param {String|Object} [extend.background={r: 0, g: 0, b: 0, alpha: 1}] - background colour, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black without transparency.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function extend (extend) {
|
||||
if (is.integer(extend) && extend > 0) {
|
||||
this.options.extendTop = extend;
|
||||
this.options.extendBottom = extend;
|
||||
this.options.extendLeft = extend;
|
||||
this.options.extendRight = extend;
|
||||
} else if (
|
||||
is.object(extend) &&
|
||||
is.integer(extend.top) && extend.top >= 0 &&
|
||||
is.integer(extend.bottom) && extend.bottom >= 0 &&
|
||||
is.integer(extend.left) && extend.left >= 0 &&
|
||||
is.integer(extend.right) && extend.right >= 0
|
||||
) {
|
||||
this.options.extendTop = extend.top;
|
||||
this.options.extendBottom = extend.bottom;
|
||||
this.options.extendLeft = extend.left;
|
||||
this.options.extendRight = extend.right;
|
||||
this._setColourOption('extendBackground', extend.background);
|
||||
} else {
|
||||
throw new Error('Invalid edge extension ' + extend);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a region of the image.
|
||||
*
|
||||
* - Use `extract` before `resize` for pre-resize extraction.
|
||||
* - Use `extract` after `resize` for post-resize extraction.
|
||||
* - Use `extract` before and after for both.
|
||||
*
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .extract({ left: left, top: top, width: width, height: height })
|
||||
* .toFile(output, function(err) {
|
||||
* // Extract a region of the input image, saving in the same format.
|
||||
* });
|
||||
* @example
|
||||
* sharp(input)
|
||||
* .extract({ left: leftOffsetPre, top: topOffsetPre, width: widthPre, height: heightPre })
|
||||
* .resize(width, height)
|
||||
* .extract({ left: leftOffsetPost, top: topOffsetPost, width: widthPost, height: heightPost })
|
||||
* .toFile(output, function(err) {
|
||||
* // Extract a region, resize, then extract from the resized image
|
||||
* });
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {Number} options.left - zero-indexed offset from left edge
|
||||
* @param {Number} options.top - zero-indexed offset from top edge
|
||||
* @param {Number} options.width - dimension of extracted image
|
||||
* @param {Number} options.height - dimension of extracted image
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function extract (options) {
|
||||
const suffix = this.options.width === -1 && this.options.height === -1 ? 'Pre' : 'Post';
|
||||
['left', 'top', 'width', 'height'].forEach(function (name) {
|
||||
const value = options[name];
|
||||
if (is.integer(value) && value >= 0) {
|
||||
this.options[name + (name === 'left' || name === 'top' ? 'Offset' : '') + suffix] = value;
|
||||
} else {
|
||||
throw new Error('Non-integer value for ' + name + ' of ' + value);
|
||||
}
|
||||
}, this);
|
||||
// Ensure existing rotation occurs before pre-resize extraction
|
||||
if (suffix === 'Pre' && ((this.options.angle % 360) !== 0 || this.options.useExifOrientation === true)) {
|
||||
this.options.rotateBeforePreExtract = true;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trim "boring" pixels from all edges that contain values similar to the top-left pixel.
|
||||
* The `info` response Object will contain `trimOffsetLeft` and `trimOffsetTop` properties.
|
||||
* @param {Number} [threshold=10] the allowed difference from the top-left pixel, a number greater than zero.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
*/
|
||||
function trim (threshold) {
|
||||
if (!is.defined(threshold)) {
|
||||
this.options.trimThreshold = 10;
|
||||
} else if (is.number(threshold) && threshold > 0) {
|
||||
this.options.trimThreshold = threshold;
|
||||
} else {
|
||||
throw is.invalidParameterError('threshold', 'number greater than zero', threshold);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
// Deprecated functions
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function crop (crop) {
|
||||
this.options.canvas = 'crop';
|
||||
if (!is.defined(crop)) {
|
||||
// Default
|
||||
this.options.crop = gravity.center;
|
||||
this.options.position = gravity.center;
|
||||
} else if (is.integer(crop) && is.inRange(crop, 0, 8)) {
|
||||
// Gravity (numeric)
|
||||
this.options.crop = crop;
|
||||
this.options.position = crop;
|
||||
} else if (is.string(crop) && is.integer(gravity[crop])) {
|
||||
// Gravity (string)
|
||||
this.options.crop = gravity[crop];
|
||||
this.options.position = gravity[crop];
|
||||
} else if (is.integer(crop) && crop >= strategy.entropy) {
|
||||
// Strategy
|
||||
this.options.crop = crop;
|
||||
this.options.position = crop;
|
||||
} else if (is.string(crop) && is.integer(strategy[crop])) {
|
||||
// Strategy (string)
|
||||
this.options.crop = strategy[crop];
|
||||
this.options.position = strategy[crop];
|
||||
} else {
|
||||
throw is.invalidParameterError('crop', 'valid crop id/name/strategy', crop);
|
||||
}
|
||||
@@ -160,66 +408,29 @@ function crop (crop) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Preserving aspect ratio, resize the image to the maximum `width` or `height` specified
|
||||
* then embed on a background of the exact `width` and `height` specified.
|
||||
*
|
||||
* If the background contains an alpha value then WebP and PNG format output images will
|
||||
* contain an alpha channel, even when the input image does not.
|
||||
*
|
||||
* @example
|
||||
* sharp('input.gif')
|
||||
* .resize(200, 300)
|
||||
* .background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
* .embed()
|
||||
* .toFormat(sharp.format.webp)
|
||||
* .toBuffer(function(err, outputBuffer) {
|
||||
* if (err) {
|
||||
* throw err;
|
||||
* }
|
||||
* // outputBuffer contains WebP image data of a 200 pixels wide and 300 pixels high
|
||||
* // containing a scaled version, embedded on a transparent canvas, of input.gif
|
||||
* });
|
||||
* @param {String} [embed='centre'] - A member of `sharp.gravity` to embed to an edge/corner.
|
||||
* @returns {Sharp}
|
||||
* @throws {Error} Invalid parameters
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function embed (embed) {
|
||||
this.options.canvas = 'embed';
|
||||
|
||||
if (!is.defined(embed)) {
|
||||
// Default
|
||||
this.options.embed = gravity.center;
|
||||
this.options.position = gravity.center;
|
||||
} else if (is.integer(embed) && is.inRange(embed, 0, 8)) {
|
||||
// Gravity (numeric)
|
||||
this.options.embed = embed;
|
||||
this.options.position = embed;
|
||||
} else if (is.string(embed) && is.integer(gravity[embed])) {
|
||||
// Gravity (string)
|
||||
this.options.embed = gravity[embed];
|
||||
this.options.position = gravity[embed];
|
||||
} else {
|
||||
throw is.invalidParameterError('embed', 'valid embed id/name', embed);
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Preserving aspect ratio, resize the image to be as large as possible
|
||||
* while ensuring its dimensions are less than or equal to the `width` and `height` specified.
|
||||
*
|
||||
* Both `width` and `height` must be provided via `resize` otherwise the behaviour will default to `crop`.
|
||||
*
|
||||
* @example
|
||||
* sharp(inputBuffer)
|
||||
* .resize(200, 200)
|
||||
* .max()
|
||||
* .toFormat('jpeg')
|
||||
* .toBuffer()
|
||||
* .then(function(outputBuffer) {
|
||||
* // outputBuffer contains JPEG image data no wider than 200 pixels and no higher
|
||||
* // than 200 pixels regardless of the inputBuffer image dimensions
|
||||
* });
|
||||
*
|
||||
* @returns {Sharp}
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function max () {
|
||||
this.options.canvas = 'max';
|
||||
@@ -227,12 +438,8 @@ function max () {
|
||||
}
|
||||
|
||||
/**
|
||||
* Preserving aspect ratio, resize the image to be as small as possible
|
||||
* while ensuring its dimensions are greater than or equal to the `width` and `height` specified.
|
||||
*
|
||||
* Both `width` and `height` must be provided via `resize` otherwise the behaviour will default to `crop`.
|
||||
*
|
||||
* @returns {Sharp}
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function min () {
|
||||
this.options.canvas = 'min';
|
||||
@@ -240,9 +447,8 @@ function min () {
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignoring the aspect ratio of the input, stretch the image to
|
||||
* the exact `width` and/or `height` provided via `resize`.
|
||||
* @returns {Sharp}
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function ignoreAspectRatio () {
|
||||
this.options.canvas = 'ignore_aspect';
|
||||
@@ -250,15 +456,8 @@ function ignoreAspectRatio () {
|
||||
}
|
||||
|
||||
/**
|
||||
* Do not enlarge the output image if the input image width *or* height are already less than the required dimensions.
|
||||
* This is equivalent to GraphicsMagick's `>` geometry option:
|
||||
* "*change the dimensions of the image only if its width or height exceeds the geometry specification*".
|
||||
* Use with `max()` to preserve the image's aspect ratio.
|
||||
*
|
||||
* The default behaviour *before* function call is `false`, meaning the image will be enlarged.
|
||||
*
|
||||
* @param {Boolean} [withoutEnlargement=true]
|
||||
* @returns {Sharp}
|
||||
* @deprecated
|
||||
* @private
|
||||
*/
|
||||
function withoutEnlargement (withoutEnlargement) {
|
||||
this.options.withoutEnlargement = is.bool(withoutEnlargement) ? withoutEnlargement : true;
|
||||
@@ -270,19 +469,23 @@ function withoutEnlargement (withoutEnlargement) {
|
||||
* @private
|
||||
*/
|
||||
module.exports = function (Sharp) {
|
||||
[
|
||||
Object.assign(Sharp.prototype, {
|
||||
resize,
|
||||
crop,
|
||||
embed,
|
||||
max,
|
||||
min,
|
||||
ignoreAspectRatio,
|
||||
withoutEnlargement
|
||||
].forEach(function (f) {
|
||||
Sharp.prototype[f.name] = f;
|
||||
extend,
|
||||
extract,
|
||||
trim
|
||||
});
|
||||
// Class attributes
|
||||
Sharp.gravity = gravity;
|
||||
Sharp.strategy = strategy;
|
||||
Sharp.kernel = kernel;
|
||||
Sharp.fit = fit;
|
||||
Sharp.position = position;
|
||||
// Deprecated functions, to be removed in v0.22.0
|
||||
Sharp.prototype.crop = deprecate(crop, 'crop(position) is deprecated, use resize({ fit: "cover", position }) instead');
|
||||
Sharp.prototype.embed = deprecate(embed, 'embed(position) is deprecated, use resize({ fit: "contain", position }) instead');
|
||||
Sharp.prototype.max = deprecate(max, 'max() is deprecated, use resize({ fit: "inside" }) instead');
|
||||
Sharp.prototype.min = deprecate(min, 'min() is deprecated, use resize({ fit: "outside" }) instead');
|
||||
Sharp.prototype.ignoreAspectRatio = deprecate(ignoreAspectRatio, 'ignoreAspectRatio() is deprecated, use resize({ fit: "fill" }) instead');
|
||||
Sharp.prototype.withoutEnlargement = deprecate(withoutEnlargement, 'withoutEnlargement() is deprecated, use resize({ withoutEnlargement: true }) instead');
|
||||
};
|
||||
|
||||
@@ -82,23 +82,20 @@ function counters () {
|
||||
* Improves the performance of `resize`, `blur` and `sharpen` operations
|
||||
* by taking advantage of the SIMD vector unit of the CPU, e.g. Intel SSE and ARM NEON.
|
||||
*
|
||||
* This feature is currently off by default but future versions may reverse this.
|
||||
* Versions of liborc prior to 0.4.25 are known to segfault under heavy load.
|
||||
*
|
||||
* @example
|
||||
* const simd = sharp.simd();
|
||||
* // simd is `true` if SIMD is currently enabled
|
||||
* // simd is `true` if the runtime use of liborc is currently enabled
|
||||
* @example
|
||||
* const simd = sharp.simd(true);
|
||||
* // attempts to enable the use of SIMD, returning true if available
|
||||
* const simd = sharp.simd(false);
|
||||
* // prevent libvips from using liborc at runtime
|
||||
*
|
||||
* @param {Boolean} [simd=false]
|
||||
* @param {Boolean} [simd=true]
|
||||
* @returns {Boolean}
|
||||
*/
|
||||
function simd (simd) {
|
||||
return sharp.simd(is.bool(simd) ? simd : null);
|
||||
}
|
||||
simd(false);
|
||||
simd(true);
|
||||
|
||||
/**
|
||||
* Decorate the Sharp class with utility-related functions.
|
||||
|
||||
@@ -5,6 +5,8 @@ site_description: High performance Node.js image processing, the fastest module
|
||||
copyright: <a href="https://pixelplumbing.com/">pixelplumbing.com</a>
|
||||
google_analytics: ['UA-13034748-12', 'sharp.pixelplumbing.com']
|
||||
theme: readthedocs
|
||||
extra_css:
|
||||
- css/extra.css
|
||||
markdown_extensions:
|
||||
- toc:
|
||||
permalink: True
|
||||
|
||||
60
package.json
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "sharp",
|
||||
"description": "High performance Node.js image processing, the fastest module to resize JPEG, PNG, WebP and TIFF images",
|
||||
"version": "0.20.2",
|
||||
"version": "0.21.1",
|
||||
"author": "Lovell Fuller <npm@lovell.info>",
|
||||
"homepage": "https://github.com/lovell/sharp",
|
||||
"contributors": [
|
||||
@@ -48,15 +48,28 @@
|
||||
"Andrea Bianco <andrea.bianco@unibas.ch>",
|
||||
"Rik Heywood <rik@rik.org>",
|
||||
"Thomas Parisot <hi@oncletom.io>",
|
||||
"Nathan Graves <nathanrgraves+github@gmail.com>"
|
||||
"Nathan Graves <nathanrgraves+github@gmail.com>",
|
||||
"Tom Lokhorst <tom@lokhorst.eu>",
|
||||
"Espen Hovlandsdal <espen@hovlandsdal.com>",
|
||||
"Sylvain Dumont <sylvain.dumont35@gmail.com>",
|
||||
"Alun Davies <alun.owain.davies@googlemail.com>",
|
||||
"Aidan Hoolachan <ajhoolachan21@gmail.com>",
|
||||
"Axel Eirola <axel.eirola@iki.fi>",
|
||||
"Freezy <freezy@xbmc.org>",
|
||||
"Daiz <taneli.vatanen@gmail.com>",
|
||||
"Julian Aubourg <j@ubourg.net>",
|
||||
"Keith Belovay <keith@picthrive.com>",
|
||||
"Michael B. Klein <mbklein@gmail.com>"
|
||||
],
|
||||
"scripts": {
|
||||
"install": "(node install/libvips && node install/dll-copy && prebuild-install) || (node-gyp rebuild && node install/dll-copy)",
|
||||
"clean": "rm -rf node_modules/ build/ vendor/ coverage/ test/fixtures/output.*",
|
||||
"test": "semistandard && cc && nyc --reporter=lcov --branches=99 mocha --slow=5000 --timeout=60000 ./test/unit/*.js && prebuild-ci",
|
||||
"coverage": "./test/coverage/report.sh",
|
||||
"clean": "rm -rf node_modules/ build/ vendor/ .nyc_output/ coverage/ test/fixtures/output.*",
|
||||
"test": "semistandard && cc && npm run test-unit && npm run test-licensing && prebuild-ci",
|
||||
"test-unit": "nyc --reporter=lcov --branches=99 mocha --slow=5000 --timeout=60000 ./test/unit/*.js",
|
||||
"test-licensing": "license-checker --production --summary --onlyAllow=\"Apache-2.0;BSD;ISC;MIT\"",
|
||||
"test-coverage": "./test/coverage/report.sh",
|
||||
"test-leak": "./test/leak/leak.sh",
|
||||
"docs": "for m in constructor input resize composite operation colour channel output utility; do documentation build --shallow --format=md lib/$m.js >docs/api-$m.md; done"
|
||||
"docs": "for m in constructor input resize composite operation colour channel output utility; do documentation build --shallow --format=md --markdown-toc=false lib/$m.js >docs/api-$m.md; done"
|
||||
},
|
||||
"main": "lib/index.js",
|
||||
"repository": {
|
||||
@@ -80,37 +93,40 @@
|
||||
"vips"
|
||||
],
|
||||
"dependencies": {
|
||||
"color": "^3.0.0",
|
||||
"bindings": "^1.3.1",
|
||||
"color": "^3.1.0",
|
||||
"detect-libc": "^1.0.3",
|
||||
"nan": "^2.10.0",
|
||||
"fs-copy-file-sync": "^1.0.1",
|
||||
"fs-copy-file-sync": "^1.1.1",
|
||||
"nan": "^2.11.1",
|
||||
"npmlog": "^4.1.2",
|
||||
"prebuild-install": "^2.5.3",
|
||||
"semver": "^5.5.0",
|
||||
"simple-get": "^2.8.1",
|
||||
"tar": "^4.4.1",
|
||||
"prebuild-install": "^5.2.2",
|
||||
"semver": "^5.6.0",
|
||||
"simple-get": "^3.0.3",
|
||||
"tar": "^4.4.8",
|
||||
"tunnel-agent": "^0.6.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"async": "^2.6.0",
|
||||
"async": "^2.6.1",
|
||||
"cc": "^1.0.2",
|
||||
"decompress-zip": "^0.3.1",
|
||||
"documentation": "^6.3.2",
|
||||
"documentation": "^8.1.2",
|
||||
"exif-reader": "^1.0.2",
|
||||
"icc": "^1.0.0",
|
||||
"mocha": "^5.1.1",
|
||||
"nyc": "^11.7.1",
|
||||
"prebuild": "^7.4.0",
|
||||
"prebuild-ci": "^2.2.3",
|
||||
"license-checker": "^24.0.1",
|
||||
"mocha": "^5.2.0",
|
||||
"mock-fs": "^4.7.0",
|
||||
"nyc": "^13.1.0",
|
||||
"prebuild": "^8.1.2",
|
||||
"prebuild-ci": "^2.3.0",
|
||||
"rimraf": "^2.6.2",
|
||||
"semistandard": "^12.0.1"
|
||||
"semistandard": "^13.0.1"
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"config": {
|
||||
"libvips": "8.6.1"
|
||||
"libvips": "8.7.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4.5.0"
|
||||
"node": ">=6"
|
||||
},
|
||||
"semistandard": {
|
||||
"env": [
|
||||
|
||||
@@ -37,6 +37,14 @@ namespace sharp {
|
||||
std::string AttrAsStr(v8::Handle<v8::Object> obj, std::string attr) {
|
||||
return *Nan::Utf8String(Nan::Get(obj, Nan::New(attr).ToLocalChecked()).ToLocalChecked());
|
||||
}
|
||||
std::vector<double> AttrAsRgba(v8::Handle<v8::Object> obj, std::string attr) {
|
||||
v8::Local<v8::Object> background = AttrAs<v8::Object>(obj, attr);
|
||||
std::vector<double> rgba(4);
|
||||
for (unsigned int i = 0; i < 4; i++) {
|
||||
rgba[i] = AttrTo<double>(background, i);
|
||||
}
|
||||
return rgba;
|
||||
}
|
||||
|
||||
// Create an InputDescriptor instance from a v8::Object describing an input image
|
||||
InputDescriptor* CreateInputDescriptor(
|
||||
@@ -55,7 +63,7 @@ namespace sharp {
|
||||
descriptor->failOnError = AttrTo<bool>(input, "failOnError");
|
||||
// Density for vector-based input
|
||||
if (HasAttr(input, "density")) {
|
||||
descriptor->density = AttrTo<uint32_t>(input, "density");
|
||||
descriptor->density = AttrTo<double>(input, "density");
|
||||
}
|
||||
// Raw pixel input
|
||||
if (HasAttr(input, "rawChannels")) {
|
||||
@@ -72,10 +80,7 @@ namespace sharp {
|
||||
descriptor->createChannels = AttrTo<uint32_t>(input, "createChannels");
|
||||
descriptor->createWidth = AttrTo<uint32_t>(input, "createWidth");
|
||||
descriptor->createHeight = AttrTo<uint32_t>(input, "createHeight");
|
||||
v8::Local<v8::Object> createBackground = AttrAs<v8::Object>(input, "createBackground");
|
||||
for (unsigned int i = 0; i < 4; i++) {
|
||||
descriptor->createBackground[i] = AttrTo<double>(createBackground, i);
|
||||
}
|
||||
descriptor->createBackground = AttrAsRgba(input, "createBackground");
|
||||
}
|
||||
return descriptor;
|
||||
}
|
||||
@@ -228,7 +233,7 @@ namespace sharp {
|
||||
->set("access", accessMethod)
|
||||
->set("fail", descriptor->failOnError);
|
||||
if (imageType == ImageType::SVG || imageType == ImageType::PDF) {
|
||||
option->set("dpi", static_cast<double>(descriptor->density));
|
||||
option->set("dpi", descriptor->density);
|
||||
}
|
||||
if (imageType == ImageType::MAGICK) {
|
||||
option->set("density", std::to_string(descriptor->density).data());
|
||||
@@ -270,7 +275,7 @@ namespace sharp {
|
||||
->set("access", accessMethod)
|
||||
->set("fail", descriptor->failOnError);
|
||||
if (imageType == ImageType::SVG || imageType == ImageType::PDF) {
|
||||
option->set("dpi", static_cast<double>(descriptor->density));
|
||||
option->set("dpi", descriptor->density);
|
||||
}
|
||||
if (imageType == ImageType::MAGICK) {
|
||||
option->set("density", std::to_string(descriptor->density).data());
|
||||
@@ -355,8 +360,8 @@ namespace sharp {
|
||||
/*
|
||||
Set pixels/mm resolution based on a pixels/inch density.
|
||||
*/
|
||||
void SetDensity(VImage image, const int density) {
|
||||
const double pixelsPerMm = static_cast<double>(density) / 25.4;
|
||||
void SetDensity(VImage image, const double density) {
|
||||
const double pixelsPerMm = density / 25.4;
|
||||
image.set("Xres", pixelsPerMm);
|
||||
image.set("Yres", pixelsPerMm);
|
||||
image.set(VIPS_META_RESOLUTION_UNIT, "in");
|
||||
@@ -370,10 +375,6 @@ namespace sharp {
|
||||
if (image.width() > 65535 || image.height() > 65535) {
|
||||
throw vips::VError("Processed image is too large for the JPEG format");
|
||||
}
|
||||
} else if (imageType == ImageType::PNG) {
|
||||
if (image.width() > 2147483647 || image.height() > 2147483647) {
|
||||
throw vips::VError("Processed image is too large for the PNG format");
|
||||
}
|
||||
} else if (imageType == ImageType::WEBP) {
|
||||
if (image.width() > 16383 || image.height() > 16383) {
|
||||
throw vips::VError("Processed image is too large for the WebP format");
|
||||
@@ -606,4 +607,40 @@ namespace sharp {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Apply the alpha channel to a given colour
|
||||
*/
|
||||
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour) {
|
||||
// Scale up 8-bit values to match 16-bit input image
|
||||
double const multiplier = sharp::Is16Bit(image.interpretation()) ? 256.0 : 1.0;
|
||||
// Create alphaColour colour
|
||||
std::vector<double> alphaColour;
|
||||
if (image.bands() > 2) {
|
||||
alphaColour = {
|
||||
multiplier * colour[0],
|
||||
multiplier * colour[1],
|
||||
multiplier * colour[2]
|
||||
};
|
||||
} else {
|
||||
// Convert sRGB to greyscale
|
||||
alphaColour = { multiplier * (
|
||||
0.2126 * colour[0] +
|
||||
0.7152 * colour[1] +
|
||||
0.0722 * colour[2])
|
||||
};
|
||||
}
|
||||
// Add alpha channel to alphaColour colour
|
||||
if (colour[3] < 255.0 || HasAlpha(image)) {
|
||||
alphaColour.push_back(colour[3] * multiplier);
|
||||
}
|
||||
// Ensure alphaColour colour uses correct colourspace
|
||||
alphaColour = sharp::GetRgbaAsColourspace(alphaColour, image.interpretation());
|
||||
// Add non-transparent alpha channel, if required
|
||||
if (colour[3] < 255.0 && !HasAlpha(image)) {
|
||||
image = image.bandjoin(
|
||||
VImage::new_matrix(image.width(), image.height()).new_from_image(255 * multiplier));
|
||||
}
|
||||
return std::make_tuple(image, alphaColour);
|
||||
}
|
||||
|
||||
} // namespace sharp
|
||||
|
||||
22
src/common.h
@@ -49,7 +49,7 @@ namespace sharp {
|
||||
char *buffer;
|
||||
bool failOnError;
|
||||
size_t bufferLength;
|
||||
int density;
|
||||
double density;
|
||||
int rawChannels;
|
||||
int rawWidth;
|
||||
int rawHeight;
|
||||
@@ -57,30 +57,27 @@ namespace sharp {
|
||||
int createChannels;
|
||||
int createWidth;
|
||||
int createHeight;
|
||||
double createBackground[4];
|
||||
std::vector<double> createBackground;
|
||||
|
||||
InputDescriptor():
|
||||
buffer(nullptr),
|
||||
failOnError(FALSE),
|
||||
bufferLength(0),
|
||||
density(72),
|
||||
density(72.0),
|
||||
rawChannels(0),
|
||||
rawWidth(0),
|
||||
rawHeight(0),
|
||||
page(0),
|
||||
createChannels(0),
|
||||
createWidth(0),
|
||||
createHeight(0) {
|
||||
createBackground[0] = 0.0;
|
||||
createBackground[1] = 0.0;
|
||||
createBackground[2] = 0.0;
|
||||
createBackground[3] = 255.0;
|
||||
}
|
||||
createHeight(0),
|
||||
createBackground{ 0.0, 0.0, 0.0, 255.0 } {}
|
||||
};
|
||||
|
||||
// Convenience methods to access the attributes of a v8::Object
|
||||
bool HasAttr(v8::Handle<v8::Object> obj, std::string attr);
|
||||
std::string AttrAsStr(v8::Handle<v8::Object> obj, std::string attr);
|
||||
std::vector<double> AttrAsRgba(v8::Handle<v8::Object> obj, std::string attr);
|
||||
template<typename T> v8::Local<T> AttrAs(v8::Handle<v8::Object> obj, std::string attr) {
|
||||
return Nan::Get(obj, Nan::New(attr).ToLocalChecked()).ToLocalChecked().As<T>();
|
||||
}
|
||||
@@ -186,7 +183,7 @@ namespace sharp {
|
||||
/*
|
||||
Set pixels/mm resolution based on a pixels/inch density.
|
||||
*/
|
||||
void SetDensity(VImage image, const int density);
|
||||
void SetDensity(VImage image, const double density);
|
||||
|
||||
/*
|
||||
Check the proposed format supports the current dimensions.
|
||||
@@ -255,6 +252,11 @@ namespace sharp {
|
||||
*/
|
||||
std::vector<double> GetRgbaAsColourspace(std::vector<double> const rgba, VipsInterpretation const interpretation);
|
||||
|
||||
/*
|
||||
Apply the alpha channel to a given colour
|
||||
*/
|
||||
std::tuple<VImage, std::vector<double>> ApplyAlpha(VImage image, std::vector<double> colour);
|
||||
|
||||
} // namespace sharp
|
||||
|
||||
#endif // SRC_COMMON_H_
|
||||
|
||||
@@ -613,7 +613,7 @@ VImage::new_matrixv( int width, int height, ... )
|
||||
}
|
||||
|
||||
VImage
|
||||
VImage::write( VImage out )
|
||||
VImage::write( VImage out ) const
|
||||
{
|
||||
if( vips_image_write( this->get_image(), out.get_image() ) )
|
||||
throw VError();
|
||||
@@ -622,7 +622,7 @@ VImage::write( VImage out )
|
||||
}
|
||||
|
||||
void
|
||||
VImage::write_to_file( const char *name, VOption *options )
|
||||
VImage::write_to_file( const char *name, VOption *options ) const
|
||||
{
|
||||
char filename[VIPS_PATH_MAX];
|
||||
char option_string[VIPS_PATH_MAX];
|
||||
@@ -642,7 +642,7 @@ VImage::write_to_file( const char *name, VOption *options )
|
||||
|
||||
void
|
||||
VImage::write_to_buffer( const char *suffix, void **buf, size_t *size,
|
||||
VOption *options )
|
||||
VOption *options ) const
|
||||
{
|
||||
char filename[VIPS_PATH_MAX];
|
||||
char option_string[VIPS_PATH_MAX];
|
||||
@@ -675,7 +675,7 @@ VImage::write_to_buffer( const char *suffix, void **buf, size_t *size,
|
||||
#include "vips-operators.cpp"
|
||||
|
||||
std::vector<VImage>
|
||||
VImage::bandsplit( VOption *options )
|
||||
VImage::bandsplit( VOption *options ) const
|
||||
{
|
||||
std::vector<VImage> b;
|
||||
|
||||
@@ -686,7 +686,7 @@ VImage::bandsplit( VOption *options )
|
||||
}
|
||||
|
||||
VImage
|
||||
VImage::bandjoin( VImage other, VOption *options )
|
||||
VImage::bandjoin( VImage other, VOption *options ) const
|
||||
{
|
||||
VImage v[2] = { *this, other };
|
||||
std::vector<VImage> vec( v, v + VIPS_NUMBER( v ) );
|
||||
@@ -695,7 +695,7 @@ VImage::bandjoin( VImage other, VOption *options )
|
||||
}
|
||||
|
||||
VImage
|
||||
VImage::composite( VImage other, VipsBlendMode mode, VOption *options )
|
||||
VImage::composite( VImage other, VipsBlendMode mode, VOption *options ) const
|
||||
{
|
||||
VImage v[2] = { *this, other };
|
||||
std::vector<VImage> ivec( v, v + VIPS_NUMBER( v ) );
|
||||
@@ -706,7 +706,7 @@ VImage::composite( VImage other, VipsBlendMode mode, VOption *options )
|
||||
}
|
||||
|
||||
std::complex<double>
|
||||
VImage::minpos( VOption *options )
|
||||
VImage::minpos( VOption *options ) const
|
||||
{
|
||||
double x, y;
|
||||
|
||||
@@ -719,7 +719,7 @@ VImage::minpos( VOption *options )
|
||||
}
|
||||
|
||||
std::complex<double>
|
||||
VImage::maxpos( VOption *options )
|
||||
VImage::maxpos( VOption *options ) const
|
||||
{
|
||||
double x, y;
|
||||
|
||||
@@ -734,43 +734,43 @@ VImage::maxpos( VOption *options )
|
||||
// Operator overloads
|
||||
|
||||
VImage
|
||||
VImage::operator[]( int index )
|
||||
VImage::operator[]( int index ) const
|
||||
{
|
||||
return( this->extract_band( index ) );
|
||||
}
|
||||
|
||||
std::vector<double>
|
||||
VImage::operator()( int x, int y )
|
||||
VImage::operator()( int x, int y ) const
|
||||
{
|
||||
return( this->getpoint( x, y ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator+( VImage a, VImage b )
|
||||
operator+( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.add( b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator+( double a, VImage b )
|
||||
operator+( double a, const VImage b )
|
||||
{
|
||||
return( b.linear( 1.0, a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator+( VImage a, double b )
|
||||
operator+( const VImage a, double b )
|
||||
{
|
||||
return( a.linear( 1.0, b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator+( std::vector<double> a, VImage b )
|
||||
operator+( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.linear( 1.0, a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator+( VImage a, std::vector<double> b )
|
||||
operator+( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.linear( 1.0, b ) );
|
||||
}
|
||||
@@ -788,37 +788,37 @@ operator+=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator+=( VImage &a, std::vector<double> b )
|
||||
operator+=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a + b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator-( VImage a, VImage b )
|
||||
operator-( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.subtract( b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator-( double a, VImage b )
|
||||
operator-( double a, const VImage b )
|
||||
{
|
||||
return( b.linear( -1.0, a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator-( VImage a, double b )
|
||||
operator-( const VImage a, double b )
|
||||
{
|
||||
return( a.linear( 1.0, -b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator-( std::vector<double> a, VImage b )
|
||||
operator-( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.linear( -1.0, a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator-( VImage a, std::vector<double> b )
|
||||
operator-( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.linear( 1.0, vips::negate( b ) ) );
|
||||
}
|
||||
@@ -836,43 +836,43 @@ operator-=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator-=( VImage &a, std::vector<double> b )
|
||||
operator-=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a - b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator-( VImage a )
|
||||
operator-( const VImage a )
|
||||
{
|
||||
return( a * -1 );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator*( VImage a, VImage b )
|
||||
operator*( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.multiply( b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator*( double a, VImage b )
|
||||
operator*( double a, const VImage b )
|
||||
{
|
||||
return( b.linear( a, 0.0 ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator*( VImage a, double b )
|
||||
operator*( const VImage a, double b )
|
||||
{
|
||||
return( a.linear( b, 0.0 ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator*( std::vector<double> a, VImage b )
|
||||
operator*( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.linear( a, 0.0 ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator*( VImage a, std::vector<double> b )
|
||||
operator*( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.linear( b, 0.0 ) );
|
||||
}
|
||||
@@ -890,37 +890,37 @@ operator*=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator*=( VImage &a, std::vector<double> b )
|
||||
operator*=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a * b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator/( VImage a, VImage b )
|
||||
operator/( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.divide( b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator/( double a, VImage b )
|
||||
operator/( double a, const VImage b )
|
||||
{
|
||||
return( b.pow( -1.0 ).linear( a, 0.0 ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator/( VImage a, double b )
|
||||
operator/( const VImage a, double b )
|
||||
{
|
||||
return( a.linear( 1.0 / b, 0.0 ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator/( std::vector<double> a, VImage b )
|
||||
operator/( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.pow( -1.0 ).linear( a, 0.0 ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator/( VImage a, std::vector<double> b )
|
||||
operator/( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.linear( vips::invert( b ), 0.0 ) );
|
||||
}
|
||||
@@ -938,25 +938,25 @@ operator/=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator/=( VImage &a, std::vector<double> b )
|
||||
operator/=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a / b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator%( VImage a, VImage b )
|
||||
operator%( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.remainder( b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator%( VImage a, double b )
|
||||
operator%( const VImage a, const double b )
|
||||
{
|
||||
return( a.remainder_const( to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator%( VImage a, std::vector<double> b )
|
||||
operator%( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.remainder_const( b ) );
|
||||
}
|
||||
@@ -974,243 +974,243 @@ operator%=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator%=( VImage &a, std::vector<double> b )
|
||||
operator%=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a % b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<( VImage a, VImage b )
|
||||
operator<( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.relational( b, VIPS_OPERATION_RELATIONAL_LESS ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<( double a, VImage b )
|
||||
operator<( const double a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_MORE,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<( VImage a, double b )
|
||||
operator<( const VImage a, const double b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_LESS,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<( std::vector<double> a, VImage b )
|
||||
operator<( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_MORE,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<( VImage a, std::vector<double> b )
|
||||
operator<( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_LESS,
|
||||
b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<=( VImage a, VImage b )
|
||||
operator<=( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.relational( b, VIPS_OPERATION_RELATIONAL_LESSEQ ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<=( double a, VImage b )
|
||||
operator<=( const double a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_MOREEQ,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<=( VImage a, double b )
|
||||
operator<=( const VImage a, const double b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_LESSEQ,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<=( std::vector<double> a, VImage b )
|
||||
operator<=( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_MOREEQ,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<=( VImage a, std::vector<double> b )
|
||||
operator<=( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_LESSEQ,
|
||||
b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>( VImage a, VImage b )
|
||||
operator>( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.relational( b, VIPS_OPERATION_RELATIONAL_MORE ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>( double a, VImage b )
|
||||
operator>( const double a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_LESS,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>( VImage a, double b )
|
||||
operator>( const VImage a, const double b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_MORE,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>( std::vector<double> a, VImage b )
|
||||
operator>( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_LESS,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>( VImage a, std::vector<double> b )
|
||||
operator>( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_MORE,
|
||||
b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>=( VImage a, VImage b )
|
||||
operator>=( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.relational( b, VIPS_OPERATION_RELATIONAL_MOREEQ ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>=( double a, VImage b )
|
||||
operator>=( const double a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_LESSEQ,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>=( VImage a, double b )
|
||||
operator>=( const VImage a, const double b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_MOREEQ,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>=( std::vector<double> a, VImage b )
|
||||
operator>=( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_LESSEQ,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>=( VImage a, std::vector<double> b )
|
||||
operator>=( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_MOREEQ,
|
||||
b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator==( VImage a, VImage b )
|
||||
operator==( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.relational( b, VIPS_OPERATION_RELATIONAL_EQUAL ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator==( double a, VImage b )
|
||||
operator==( const double a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_EQUAL,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator==( VImage a, double b )
|
||||
operator==( const VImage a, const double b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_EQUAL,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator==( std::vector<double> a, VImage b )
|
||||
operator==( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_EQUAL,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator==( VImage a, std::vector<double> b )
|
||||
operator==( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_EQUAL,
|
||||
b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator!=( VImage a, VImage b )
|
||||
operator!=( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.relational( b, VIPS_OPERATION_RELATIONAL_NOTEQ ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator!=( double a, VImage b )
|
||||
operator!=( const double a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_NOTEQ,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator!=( VImage a, double b )
|
||||
operator!=( const VImage a, const double b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_NOTEQ,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator!=( std::vector<double> a, VImage b )
|
||||
operator!=( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.relational_const( VIPS_OPERATION_RELATIONAL_NOTEQ,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator!=( VImage a, std::vector<double> b )
|
||||
operator!=( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.relational_const( VIPS_OPERATION_RELATIONAL_NOTEQ,
|
||||
b ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator&( VImage a, VImage b )
|
||||
operator&( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.boolean( b, VIPS_OPERATION_BOOLEAN_AND ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator&( double a, VImage b )
|
||||
operator&( const double a, const VImage b )
|
||||
{
|
||||
return( b.boolean_const( VIPS_OPERATION_BOOLEAN_AND,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator&( VImage a, double b )
|
||||
operator&( const VImage a, const double b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_AND,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator&( std::vector<double> a, VImage b )
|
||||
operator&( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.boolean_const( VIPS_OPERATION_BOOLEAN_AND, a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator&( VImage a, std::vector<double> b )
|
||||
operator&( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_AND, b ) );
|
||||
}
|
||||
@@ -1228,40 +1228,40 @@ operator&=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator&=( VImage &a, std::vector<double> b )
|
||||
operator&=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a & b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator|( VImage a, VImage b )
|
||||
operator|( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.boolean( b, VIPS_OPERATION_BOOLEAN_OR ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator|( double a, VImage b )
|
||||
operator|( const double a, const VImage b )
|
||||
{
|
||||
return( b.boolean_const( VIPS_OPERATION_BOOLEAN_OR,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator|( VImage a, double b )
|
||||
operator|( const VImage a, const double b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_OR,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator|( std::vector<double> a, VImage b )
|
||||
operator|( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.boolean_const( VIPS_OPERATION_BOOLEAN_OR,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator|( VImage a, std::vector<double> b )
|
||||
operator|( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_OR,
|
||||
b ) );
|
||||
@@ -1280,40 +1280,40 @@ operator|=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator|=( VImage &a, std::vector<double> b )
|
||||
operator|=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a | b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator^( VImage a, VImage b )
|
||||
operator^( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.boolean( b, VIPS_OPERATION_BOOLEAN_EOR ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator^( double a, VImage b )
|
||||
operator^( const double a, const VImage b )
|
||||
{
|
||||
return( b.boolean_const( VIPS_OPERATION_BOOLEAN_EOR,
|
||||
to_vector( a ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator^( VImage a, double b )
|
||||
operator^( const VImage a, const double b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_EOR,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator^( std::vector<double> a, VImage b )
|
||||
operator^( const std::vector<double> a, const VImage b )
|
||||
{
|
||||
return( b.boolean_const( VIPS_OPERATION_BOOLEAN_EOR,
|
||||
a ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator^( VImage a, std::vector<double> b )
|
||||
operator^( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_EOR,
|
||||
b ) );
|
||||
@@ -1332,26 +1332,26 @@ operator^=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator^=( VImage &a, std::vector<double> b )
|
||||
operator^=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a ^ b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<<( VImage a, VImage b )
|
||||
operator<<( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.boolean( b, VIPS_OPERATION_BOOLEAN_LSHIFT ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<<( VImage a, double b )
|
||||
operator<<( const VImage a, const double b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_LSHIFT,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator<<( VImage a, std::vector<double> b )
|
||||
operator<<( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_LSHIFT,
|
||||
b ) );
|
||||
@@ -1370,26 +1370,26 @@ operator<<=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator<<=( VImage &a, std::vector<double> b )
|
||||
operator<<=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a << b );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>>( VImage a, VImage b )
|
||||
operator>>( const VImage a, const VImage b )
|
||||
{
|
||||
return( a.boolean( b, VIPS_OPERATION_BOOLEAN_RSHIFT ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>>( VImage a, double b )
|
||||
operator>>( const VImage a, const double b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_RSHIFT,
|
||||
to_vector( b ) ) );
|
||||
}
|
||||
|
||||
VImage
|
||||
operator>>( VImage a, std::vector<double> b )
|
||||
operator>>( const VImage a, const std::vector<double> b )
|
||||
{
|
||||
return( a.boolean_const( VIPS_OPERATION_BOOLEAN_RSHIFT,
|
||||
b ) );
|
||||
@@ -1408,7 +1408,7 @@ operator>>=( VImage &a, const double b )
|
||||
}
|
||||
|
||||
VImage &
|
||||
operator>>=( VImage &a, std::vector<double> b )
|
||||
operator>>=( VImage &a, const std::vector<double> b )
|
||||
{
|
||||
return( a = a << b );
|
||||
}
|
||||
|
||||
@@ -62,6 +62,12 @@ class MetadataWorker : public Nan::AsyncWorker {
|
||||
if (sharp::HasDensity(image)) {
|
||||
baton->density = sharp::GetDensity(image);
|
||||
}
|
||||
if (image.get_typeof("jpeg-chroma-subsample") == VIPS_TYPE_REF_STRING) {
|
||||
baton->chromaSubsampling = image.get_string("jpeg-chroma-subsample");
|
||||
}
|
||||
if (image.get_typeof("interlaced") == G_TYPE_INT) {
|
||||
baton->isProgressive = image.get_int("interlaced") == 1;
|
||||
}
|
||||
baton->hasProfile = sharp::HasProfile(image);
|
||||
// Derived attributes
|
||||
baton->hasAlpha = sharp::HasAlpha(image);
|
||||
@@ -83,9 +89,9 @@ class MetadataWorker : public Nan::AsyncWorker {
|
||||
baton->iccLength = iccLength;
|
||||
}
|
||||
// IPTC
|
||||
if (image.get_typeof(VIPS_META_IPCT_NAME) == VIPS_TYPE_BLOB) {
|
||||
if (image.get_typeof(VIPS_META_IPTC_NAME) == VIPS_TYPE_BLOB) {
|
||||
size_t iptcLength;
|
||||
void const *iptc = image.get_blob(VIPS_META_IPCT_NAME, &iptcLength);
|
||||
void const *iptc = image.get_blob(VIPS_META_IPTC_NAME, &iptcLength);
|
||||
baton->iptc = static_cast<char *>(g_malloc(iptcLength));
|
||||
memcpy(baton->iptc, iptc, iptcLength);
|
||||
baton->iptcLength = iptcLength;
|
||||
@@ -117,6 +123,9 @@ class MetadataWorker : public Nan::AsyncWorker {
|
||||
// Metadata Object
|
||||
v8::Local<v8::Object> info = New<v8::Object>();
|
||||
Set(info, New("format").ToLocalChecked(), New<v8::String>(baton->format).ToLocalChecked());
|
||||
if (baton->input->bufferLength > 0) {
|
||||
Set(info, New("size").ToLocalChecked(), New<v8::Uint32>(static_cast<uint32_t>(baton->input->bufferLength)));
|
||||
}
|
||||
Set(info, New("width").ToLocalChecked(), New<v8::Uint32>(baton->width));
|
||||
Set(info, New("height").ToLocalChecked(), New<v8::Uint32>(baton->height));
|
||||
Set(info, New("space").ToLocalChecked(), New<v8::String>(baton->space).ToLocalChecked());
|
||||
@@ -125,6 +134,12 @@ class MetadataWorker : public Nan::AsyncWorker {
|
||||
if (baton->density > 0) {
|
||||
Set(info, New("density").ToLocalChecked(), New<v8::Uint32>(baton->density));
|
||||
}
|
||||
if (!baton->chromaSubsampling.empty()) {
|
||||
Set(info,
|
||||
New("chromaSubsampling").ToLocalChecked(),
|
||||
New<v8::String>(baton->chromaSubsampling).ToLocalChecked());
|
||||
}
|
||||
Set(info, New("isProgressive").ToLocalChecked(), New<v8::Boolean>(baton->isProgressive));
|
||||
Set(info, New("hasProfile").ToLocalChecked(), New<v8::Boolean>(baton->hasProfile));
|
||||
Set(info, New("hasAlpha").ToLocalChecked(), New<v8::Boolean>(baton->hasAlpha));
|
||||
if (baton->orientation > 0) {
|
||||
|
||||
@@ -31,6 +31,8 @@ struct MetadataBaton {
|
||||
int channels;
|
||||
std::string depth;
|
||||
int density;
|
||||
std::string chromaSubsampling;
|
||||
bool isProgressive;
|
||||
bool hasProfile;
|
||||
bool hasAlpha;
|
||||
int orientation;
|
||||
@@ -50,6 +52,7 @@ struct MetadataBaton {
|
||||
height(0),
|
||||
channels(0),
|
||||
density(0),
|
||||
isProgressive(false),
|
||||
hasProfile(false),
|
||||
hasAlpha(false),
|
||||
orientation(0),
|
||||
|
||||
@@ -28,6 +28,16 @@ using vips::VError;
|
||||
|
||||
namespace sharp {
|
||||
|
||||
/*
|
||||
Removes alpha channel, if any.
|
||||
*/
|
||||
VImage RemoveAlpha(VImage image) {
|
||||
if (HasAlpha(image)) {
|
||||
image = image.extract_band(0, VImage::option()->set("n", image.bands() - 1));
|
||||
}
|
||||
return image;
|
||||
}
|
||||
|
||||
/*
|
||||
Composite overlayImage over image at given position
|
||||
Assumes alpha channels are already premultiplied and will be unpremultiplied after
|
||||
@@ -68,7 +78,7 @@ namespace sharp {
|
||||
//
|
||||
// References:
|
||||
// - http://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
|
||||
// - https://github.com/jcupitt/ruby-vips/issues/28#issuecomment-9014826
|
||||
// - https://github.com/libvips/ruby-vips/issues/28#issuecomment-9014826
|
||||
//
|
||||
// out_a = src_a + dst_a * (1 - src_a)
|
||||
// ^^^^^^^^^^^
|
||||
@@ -161,13 +171,14 @@ namespace sharp {
|
||||
if (typeBeforeTint == VIPS_INTERPRETATION_RGB) {
|
||||
typeBeforeTint = VIPS_INTERPRETATION_sRGB;
|
||||
}
|
||||
// Create 2 band image with every pixel set to the tint chroma
|
||||
std::vector<double> chromaPixel {a, b};
|
||||
VImage chroma = image.new_from_image(chromaPixel);
|
||||
// Extract luminance
|
||||
VImage luminance = image.colourspace(VIPS_INTERPRETATION_LAB)[0];
|
||||
// Create the tinted version by combining the L from the original and the chroma from the tint
|
||||
VImage tinted = luminance.bandjoin(chroma).colourspace(typeBeforeTint);
|
||||
std::vector<double> chroma {a, b};
|
||||
VImage tinted = luminance
|
||||
.bandjoin(chroma)
|
||||
.copy(VImage::option()->set("interpretation", VIPS_INTERPRETATION_LAB))
|
||||
.colourspace(typeBeforeTint);
|
||||
// Attach original alpha channel, if any
|
||||
if (HasAlpha(image)) {
|
||||
// Extract original alpha channel
|
||||
@@ -222,10 +233,8 @@ namespace sharp {
|
||||
VImage Gamma(VImage image, double const exponent) {
|
||||
if (HasAlpha(image)) {
|
||||
// Separate alpha channel
|
||||
VImage imageWithoutAlpha = image.extract_band(0,
|
||||
VImage::option()->set("n", image.bands() - 1));
|
||||
VImage alpha = image[image.bands() - 1];
|
||||
return imageWithoutAlpha.gamma(VImage::option()->set("exponent", exponent)).bandjoin(alpha);
|
||||
return RemoveAlpha(image).gamma(VImage::option()->set("exponent", exponent)).bandjoin(alpha);
|
||||
} else {
|
||||
return image.gamma(VImage::option()->set("exponent", exponent));
|
||||
}
|
||||
@@ -269,6 +278,25 @@ namespace sharp {
|
||||
return image.conv(kernel);
|
||||
}
|
||||
|
||||
/*
|
||||
* Recomb with a Matrix of the given bands/channel size.
|
||||
* Eg. RGB will be a 3x3 matrix.
|
||||
*/
|
||||
VImage Recomb(VImage image, std::unique_ptr<double[]> const &matrix) {
|
||||
double *m = matrix.get();
|
||||
return image
|
||||
.colourspace(VIPS_INTERPRETATION_sRGB)
|
||||
.recomb(image.bands() == 3
|
||||
? VImage::new_from_memory(
|
||||
m, 9 * sizeof(double), 3, 3, 1, VIPS_FORMAT_DOUBLE
|
||||
)
|
||||
: VImage::new_matrixv(4, 4,
|
||||
m[0], m[1], m[2], 0.0,
|
||||
m[3], m[4], m[5], 0.0,
|
||||
m[6], m[7], m[8], 0.0,
|
||||
0.0, 0.0, 0.0, 1.0));
|
||||
}
|
||||
|
||||
/*
|
||||
* Sharpen flat and jagged areas. Use sigma of -1.0 for fast sharpen.
|
||||
*/
|
||||
@@ -315,55 +343,22 @@ namespace sharp {
|
||||
return image.boolean(imageR, boolean);
|
||||
}
|
||||
|
||||
VImage Trim(VImage image, int const tolerance) {
|
||||
using sharp::MaximumImageAlpha;
|
||||
// An equivalent of ImageMagick's -trim in C++ ... automatically remove
|
||||
// "boring" image edges.
|
||||
|
||||
// We use .project to sum the rows and columns of a 0/255 mask image, the first
|
||||
// non-zero row or column is the object edge. We make the mask image with an
|
||||
// amount-different-from-background image plus a threshold.
|
||||
|
||||
// find the value of the pixel at (0, 0) ... we will search for all pixels
|
||||
// significantly different from this
|
||||
std::vector<double> background = image(0, 0);
|
||||
|
||||
double const max = MaximumImageAlpha(image.interpretation());
|
||||
|
||||
// we need to smooth the image, subtract the background from every pixel, take
|
||||
// the absolute value of the difference, then threshold
|
||||
VImage mask = (image.median(3) - background).abs() > (max * tolerance / 100);
|
||||
|
||||
// sum mask rows and columns, then search for the first non-zero sum in each
|
||||
// direction
|
||||
VImage rows;
|
||||
VImage columns = mask.project(&rows);
|
||||
|
||||
VImage profileLeftV;
|
||||
VImage profileLeftH = columns.profile(&profileLeftV);
|
||||
|
||||
VImage profileRightV;
|
||||
VImage profileRightH = columns.fliphor().profile(&profileRightV);
|
||||
|
||||
VImage profileTopV;
|
||||
VImage profileTopH = rows.profile(&profileTopV);
|
||||
|
||||
VImage profileBottomV;
|
||||
VImage profileBottomH = rows.flipver().profile(&profileBottomV);
|
||||
|
||||
int left = static_cast<int>(floor(profileLeftV.min()));
|
||||
int right = columns.width() - static_cast<int>(floor(profileRightV.min()));
|
||||
int top = static_cast<int>(floor(profileTopH.min()));
|
||||
int bottom = rows.height() - static_cast<int>(floor(profileBottomH.min()));
|
||||
|
||||
int width = right - left;
|
||||
int height = bottom - top;
|
||||
|
||||
if (width <= 0 || height <= 0) {
|
||||
/*
|
||||
Trim an image
|
||||
*/
|
||||
VImage Trim(VImage image, double const threshold) {
|
||||
// Top-left pixel provides the background colour
|
||||
VImage background = image.extract_area(0, 0, 1, 1);
|
||||
if (HasAlpha(background)) {
|
||||
background = background.flatten();
|
||||
}
|
||||
int top, width, height;
|
||||
int const left = image.find_trim(&top, &width, &height, VImage::option()
|
||||
->set("background", background(0, 0))
|
||||
->set("threshold", threshold));
|
||||
if (width == 0 || height == 0) {
|
||||
throw VError("Unexpected error while trimming. Try to lower the tolerance");
|
||||
}
|
||||
|
||||
// and now crop the original image
|
||||
return image.extract_area(left, top, width, height);
|
||||
}
|
||||
|
||||
@@ -373,10 +368,8 @@ namespace sharp {
|
||||
VImage Linear(VImage image, double const a, double const b) {
|
||||
if (HasAlpha(image)) {
|
||||
// Separate alpha channel
|
||||
VImage imageWithoutAlpha = image.extract_band(0,
|
||||
VImage::option()->set("n", image.bands() - 1));
|
||||
VImage alpha = image[image.bands() - 1];
|
||||
return imageWithoutAlpha.linear(a, b).bandjoin(alpha);
|
||||
return RemoveAlpha(image).linear(a, b).bandjoin(alpha);
|
||||
} else {
|
||||
return image.linear(a, b);
|
||||
}
|
||||
|
||||
@@ -25,6 +25,11 @@ using vips::VImage;
|
||||
|
||||
namespace sharp {
|
||||
|
||||
/*
|
||||
Removes alpha channel, if any.
|
||||
*/
|
||||
VImage RemoveAlpha(VImage image);
|
||||
|
||||
/*
|
||||
Alpha composite src over dst with given gravity.
|
||||
Assumes alpha channels are already premultiplied and will be unpremultiplied after.
|
||||
@@ -95,13 +100,19 @@ namespace sharp {
|
||||
/*
|
||||
Trim an image
|
||||
*/
|
||||
VImage Trim(VImage image, int const tolerance);
|
||||
VImage Trim(VImage image, double const threshold);
|
||||
|
||||
/*
|
||||
* Linear adjustment (a * in + b)
|
||||
*/
|
||||
VImage Linear(VImage image, double const a, double const b);
|
||||
|
||||
/*
|
||||
* Recomb with a Matrix of the given bands/channel size.
|
||||
* Eg. RGB will be a 3x3 matrix.
|
||||
*/
|
||||
VImage Recomb(VImage image, std::unique_ptr<double[]> const &matrix);
|
||||
|
||||
} // namespace sharp
|
||||
|
||||
#endif // SRC_OPERATIONS_H_
|
||||
|
||||
228
src/pipeline.cc
@@ -100,8 +100,10 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
}
|
||||
|
||||
// Trim
|
||||
if (baton->trimTolerance != 0) {
|
||||
image = sharp::Trim(image, baton->trimTolerance);
|
||||
if (baton->trimThreshold > 0.0) {
|
||||
image = sharp::Trim(image, baton->trimThreshold);
|
||||
baton->trimOffsetLeft = image.xoffset();
|
||||
baton->trimOffsetTop = image.yoffset();
|
||||
}
|
||||
|
||||
// Pre extraction
|
||||
@@ -233,7 +235,7 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
if (
|
||||
xshrink == yshrink && xshrink >= 2 * shrink_on_load_factor &&
|
||||
(inputImageType == ImageType::JPEG || inputImageType == ImageType::WEBP) &&
|
||||
baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimTolerance == 0
|
||||
baton->gamma == 0 && baton->topOffsetPre == -1 && baton->trimThreshold == 0.0
|
||||
) {
|
||||
if (xshrink >= 8 * shrink_on_load_factor) {
|
||||
xfactor = xfactor / 8;
|
||||
@@ -281,15 +283,17 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
}
|
||||
}
|
||||
// Recalculate integral shrink and double residual
|
||||
int shrunkOnLoadWidth = image.width();
|
||||
int shrunkOnLoadHeight = image.height();
|
||||
int const shrunkOnLoadWidth = image.width();
|
||||
int const shrunkOnLoadHeight = image.height();
|
||||
if (!baton->rotateBeforePreExtract &&
|
||||
(rotation == VIPS_ANGLE_D90 || rotation == VIPS_ANGLE_D270)) {
|
||||
// Swap input output width and height when rotating by 90 or 270 degrees
|
||||
std::swap(shrunkOnLoadWidth, shrunkOnLoadHeight);
|
||||
// Swap when rotating by 90 or 270 degrees
|
||||
xfactor = static_cast<double>(shrunkOnLoadWidth) / static_cast<double>(targetResizeHeight);
|
||||
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeWidth);
|
||||
} else {
|
||||
xfactor = static_cast<double>(shrunkOnLoadWidth) / static_cast<double>(targetResizeWidth);
|
||||
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeHeight);
|
||||
}
|
||||
xfactor = static_cast<double>(shrunkOnLoadWidth) / static_cast<double>(targetResizeWidth);
|
||||
yfactor = static_cast<double>(shrunkOnLoadHeight) / static_cast<double>(targetResizeHeight);
|
||||
}
|
||||
|
||||
// Ensure we're using a device-independent colour space
|
||||
@@ -316,9 +320,9 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
double const multiplier = sharp::Is16Bit(image.interpretation()) ? 256.0 : 1.0;
|
||||
// Background colour
|
||||
std::vector<double> background {
|
||||
baton->background[0] * multiplier,
|
||||
baton->background[1] * multiplier,
|
||||
baton->background[2] * multiplier
|
||||
baton->flattenBackground[0] * multiplier,
|
||||
baton->flattenBackground[1] * multiplier,
|
||||
baton->flattenBackground[2] * multiplier
|
||||
};
|
||||
image = image.flatten(VImage::option()
|
||||
->set("background", background));
|
||||
@@ -377,11 +381,10 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
vips_enum_from_nick(nullptr, VIPS_TYPE_KERNEL, baton->kernel.data()));
|
||||
if (
|
||||
kernel != VIPS_KERNEL_NEAREST && kernel != VIPS_KERNEL_CUBIC && kernel != VIPS_KERNEL_LANCZOS2 &&
|
||||
kernel != VIPS_KERNEL_LANCZOS3
|
||||
kernel != VIPS_KERNEL_LANCZOS3 && kernel != VIPS_KERNEL_MITCHELL
|
||||
) {
|
||||
throw vips::VError("Unknown kernel");
|
||||
}
|
||||
|
||||
image = image.resize(1.0 / xfactor, VImage::option()
|
||||
->set("vscale", 1.0 / yfactor)
|
||||
->set("kernel", kernel));
|
||||
@@ -420,35 +423,8 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
// Crop/embed
|
||||
if (image.width() != baton->width || image.height() != baton->height) {
|
||||
if (baton->canvas == Canvas::EMBED) {
|
||||
// Scale up 8-bit values to match 16-bit input image
|
||||
double const multiplier = sharp::Is16Bit(image.interpretation()) ? 256.0 : 1.0;
|
||||
// Create background colour
|
||||
std::vector<double> background;
|
||||
if (image.bands() > 2) {
|
||||
background = {
|
||||
multiplier * baton->background[0],
|
||||
multiplier * baton->background[1],
|
||||
multiplier * baton->background[2]
|
||||
};
|
||||
} else {
|
||||
// Convert sRGB to greyscale
|
||||
background = { multiplier * (
|
||||
0.2126 * baton->background[0] +
|
||||
0.7152 * baton->background[1] +
|
||||
0.0722 * baton->background[2])
|
||||
};
|
||||
}
|
||||
// Add alpha channel to background colour
|
||||
if (baton->background[3] < 255.0 || HasAlpha(image)) {
|
||||
background.push_back(baton->background[3] * multiplier);
|
||||
}
|
||||
// Ensure background colour uses correct colourspace
|
||||
background = sharp::GetRgbaAsColourspace(background, image.interpretation());
|
||||
// Add non-transparent alpha channel, if required
|
||||
if (baton->background[3] < 255.0 && !HasAlpha(image)) {
|
||||
image = image.bandjoin(
|
||||
VImage::new_matrix(image.width(), image.height()).new_from_image(255 * multiplier));
|
||||
}
|
||||
std::tie(image, background) = sharp::ApplyAlpha(image, baton->resizeBackground);
|
||||
|
||||
// Embed
|
||||
|
||||
@@ -462,7 +438,7 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
int width = std::max(image.width(), baton->width);
|
||||
int height = std::max(image.height(), baton->height);
|
||||
std::tie(left, top) = sharp::CalculateEmbedPosition(
|
||||
image.width(), image.height(), baton->width, baton->height, baton->embed);
|
||||
image.width(), image.height(), baton->width, baton->height, baton->position);
|
||||
|
||||
image = image.embed(left, top, width, height, VImage::option()
|
||||
->set("extend", VIPS_EXTEND_BACKGROUND)
|
||||
@@ -473,12 +449,12 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
(image.width() > baton->width || image.height() > baton->height)
|
||||
) {
|
||||
// Crop/max/min
|
||||
if (baton->crop < 9) {
|
||||
if (baton->position < 9) {
|
||||
// Gravity-based crop
|
||||
int left;
|
||||
int top;
|
||||
std::tie(left, top) = sharp::CalculateCrop(
|
||||
image.width(), image.height(), baton->width, baton->height, baton->crop);
|
||||
image.width(), image.height(), baton->width, baton->height, baton->position);
|
||||
int width = std::min(image.width(), baton->width);
|
||||
int height = std::min(image.height(), baton->height);
|
||||
image = image.extract_area(left, top, width, height);
|
||||
@@ -494,7 +470,7 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
->set("access", baton->accessMethod)
|
||||
->set("threaded", TRUE));
|
||||
image = image.smartcrop(baton->width, baton->height, VImage::option()
|
||||
->set("interesting", baton->crop == 16 ? VIPS_INTERESTING_ENTROPY : VIPS_INTERESTING_ATTENTION));
|
||||
->set("interesting", baton->position == 16 ? VIPS_INTERESTING_ENTROPY : VIPS_INTERESTING_ATTENTION));
|
||||
baton->hasCropOffset = true;
|
||||
baton->cropOffsetLeft = static_cast<int>(image.xoffset());
|
||||
baton->cropOffsetTop = static_cast<int>(image.yoffset());
|
||||
@@ -502,6 +478,13 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
}
|
||||
}
|
||||
|
||||
// Rotate by degree
|
||||
if (baton->rotationAngle != 0.0) {
|
||||
std::vector<double> background;
|
||||
std::tie(image, background) = sharp::ApplyAlpha(image, baton->rotationBackground);
|
||||
image = image.rotate(baton->rotationAngle, VImage::option()->set("background", background));
|
||||
}
|
||||
|
||||
// Post extraction
|
||||
if (baton->topOffsetPost != -1) {
|
||||
image = image.extract_area(
|
||||
@@ -510,35 +493,9 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
|
||||
// Extend edges
|
||||
if (baton->extendTop > 0 || baton->extendBottom > 0 || baton->extendLeft > 0 || baton->extendRight > 0) {
|
||||
// Scale up 8-bit values to match 16-bit input image
|
||||
double const multiplier = sharp::Is16Bit(image.interpretation()) ? 256.0 : 1.0;
|
||||
// Create background colour
|
||||
std::vector<double> background;
|
||||
if (image.bands() > 2) {
|
||||
background = {
|
||||
multiplier * baton->background[0],
|
||||
multiplier * baton->background[1],
|
||||
multiplier * baton->background[2]
|
||||
};
|
||||
} else {
|
||||
// Convert sRGB to greyscale
|
||||
background = { multiplier * (
|
||||
0.2126 * baton->background[0] +
|
||||
0.7152 * baton->background[1] +
|
||||
0.0722 * baton->background[2])
|
||||
};
|
||||
}
|
||||
// Add alpha channel to background colour
|
||||
if (baton->background[3] < 255.0 || HasAlpha(image)) {
|
||||
background.push_back(baton->background[3] * multiplier);
|
||||
}
|
||||
// Ensure background colour uses correct colourspace
|
||||
background = sharp::GetRgbaAsColourspace(background, image.interpretation());
|
||||
// Add non-transparent alpha channel, if required
|
||||
if (baton->background[3] < 255.0 && !HasAlpha(image)) {
|
||||
image = image.bandjoin(
|
||||
VImage::new_matrix(image.width(), image.height()).new_from_image(255 * multiplier));
|
||||
}
|
||||
std::tie(image, background) = sharp::ApplyAlpha(image, baton->extendBackground);
|
||||
|
||||
// Embed
|
||||
baton->width = image.width() + baton->extendLeft + baton->extendRight;
|
||||
baton->height = image.height() + baton->extendTop + baton->extendBottom;
|
||||
@@ -568,6 +525,11 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
baton->convKernel);
|
||||
}
|
||||
|
||||
// Recomb
|
||||
if (baton->recombMatrix != NULL) {
|
||||
image = sharp::Recomb(image, baton->recombMatrix);
|
||||
}
|
||||
|
||||
// Sharpen
|
||||
if (shouldSharpen) {
|
||||
image = sharp::Sharpen(image, baton->sharpenSigma, baton->sharpenFlat, baton->sharpenJagged);
|
||||
@@ -655,8 +617,8 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
baton->premultiplied = shouldPremultiplyAlpha;
|
||||
|
||||
// Gamma decoding (brighten)
|
||||
if (baton->gamma >= 1 && baton->gamma <= 3) {
|
||||
image = sharp::Gamma(image, baton->gamma);
|
||||
if (baton->gammaOut >= 1 && baton->gammaOut <= 3) {
|
||||
image = sharp::Gamma(image, baton->gammaOut);
|
||||
}
|
||||
|
||||
// Linear adjustment (a * in + b)
|
||||
@@ -683,7 +645,7 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
}
|
||||
|
||||
// Tint the image
|
||||
if (baton->tintA > 0 || baton->tintB > 0) {
|
||||
if (baton->tintA < 128.0 || baton->tintB < 128.0) {
|
||||
image = sharp::Tint(image, baton->tintA, baton->tintB);
|
||||
}
|
||||
|
||||
@@ -693,8 +655,19 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
(baton->err).append("Cannot extract channel from image. Too few channels in image.");
|
||||
return Error();
|
||||
}
|
||||
image = image.extract_band(baton->extractChannel);
|
||||
VipsInterpretation const interpretation = sharp::Is16Bit(image.interpretation())
|
||||
? VIPS_INTERPRETATION_GREY16
|
||||
: VIPS_INTERPRETATION_B_W;
|
||||
image = image
|
||||
.extract_band(baton->extractChannel)
|
||||
.copy(VImage::option()->set("interpretation", interpretation));
|
||||
}
|
||||
|
||||
// Remove alpha channel, if any
|
||||
if (baton->removeAlpha) {
|
||||
image = sharp::RemoveAlpha(image);
|
||||
}
|
||||
|
||||
// Convert image to sRGB, if not already
|
||||
if (sharp::Is16Bit(image.interpretation())) {
|
||||
image = image.cast(VIPS_FORMAT_USHORT);
|
||||
@@ -730,9 +703,10 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
->set("interlace", baton->jpegProgressive)
|
||||
->set("no_subsample", baton->jpegChromaSubsampling == "4:4:4")
|
||||
->set("trellis_quant", baton->jpegTrellisQuantisation)
|
||||
->set("quant_table", baton->jpegQuantisationTable)
|
||||
->set("overshoot_deringing", baton->jpegOvershootDeringing)
|
||||
->set("optimize_scans", baton->jpegOptimiseScans)
|
||||
->set("optimize_coding", TRUE)));
|
||||
->set("optimize_coding", baton->jpegOptimiseCoding)));
|
||||
baton->bufferOut = static_cast<char*>(area->data);
|
||||
baton->bufferOutLength = area->length;
|
||||
area->free_fn = nullptr;
|
||||
@@ -789,6 +763,10 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
->set("squash", baton->tiffSquash)
|
||||
->set("compression", baton->tiffCompression)
|
||||
->set("predictor", baton->tiffPredictor)
|
||||
->set("pyramid", baton->tiffPyramid)
|
||||
->set("tile", baton->tiffTile)
|
||||
->set("tile_height", baton->tiffTileHeight)
|
||||
->set("tile_width", baton->tiffTileWidth)
|
||||
->set("xres", baton->tiffXres)
|
||||
->set("yres", baton->tiffYres)));
|
||||
baton->bufferOut = static_cast<char*>(area->data);
|
||||
@@ -802,6 +780,7 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
if (baton->greyscale || image.interpretation() == VIPS_INTERPRETATION_B_W) {
|
||||
// Extract first band for greyscale image
|
||||
image = image[0];
|
||||
baton->channels = 1;
|
||||
}
|
||||
if (image.format() != VIPS_FORMAT_UCHAR) {
|
||||
// Cast pixels to uint8 (unsigned char)
|
||||
@@ -845,9 +824,10 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
->set("interlace", baton->jpegProgressive)
|
||||
->set("no_subsample", baton->jpegChromaSubsampling == "4:4:4")
|
||||
->set("trellis_quant", baton->jpegTrellisQuantisation)
|
||||
->set("quant_table", baton->jpegQuantisationTable)
|
||||
->set("overshoot_deringing", baton->jpegOvershootDeringing)
|
||||
->set("optimize_scans", baton->jpegOptimiseScans)
|
||||
->set("optimize_coding", TRUE));
|
||||
->set("optimize_coding", baton->jpegOptimiseCoding));
|
||||
baton->formatOut = "jpeg";
|
||||
baton->channels = std::min(baton->channels, 3);
|
||||
} else if (baton->formatOut == "png" || (mightMatchInput && isPng) || (willMatchInput &&
|
||||
@@ -880,16 +860,16 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
if (baton->tiffCompression == VIPS_FOREIGN_TIFF_COMPRESSION_JPEG) {
|
||||
sharp::AssertImageTypeDimensions(image, ImageType::JPEG);
|
||||
}
|
||||
// Cast pixel values to float, if required
|
||||
if (baton->tiffPredictor == VIPS_FOREIGN_TIFF_PREDICTOR_FLOAT) {
|
||||
image = image.cast(VIPS_FORMAT_FLOAT);
|
||||
}
|
||||
image.tiffsave(const_cast<char*>(baton->fileOut.data()), VImage::option()
|
||||
->set("strip", !baton->withMetadata)
|
||||
->set("Q", baton->tiffQuality)
|
||||
->set("squash", baton->tiffSquash)
|
||||
->set("compression", baton->tiffCompression)
|
||||
->set("predictor", baton->tiffPredictor)
|
||||
->set("pyramid", baton->tiffPyramid)
|
||||
->set("tile", baton->tiffTile)
|
||||
->set("tile_height", baton->tiffTileHeight)
|
||||
->set("tile_width", baton->tiffTileWidth)
|
||||
->set("xres", baton->tiffXres)
|
||||
->set("yres", baton->tiffYres));
|
||||
baton->formatOut = "tiff";
|
||||
@@ -924,21 +904,30 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
{"interlace", baton->jpegProgressive ? "TRUE" : "FALSE"},
|
||||
{"no_subsample", baton->jpegChromaSubsampling == "4:4:4" ? "TRUE": "FALSE"},
|
||||
{"trellis_quant", baton->jpegTrellisQuantisation ? "TRUE" : "FALSE"},
|
||||
{"quant_table", std::to_string(baton->jpegQuantisationTable)},
|
||||
{"overshoot_deringing", baton->jpegOvershootDeringing ? "TRUE": "FALSE"},
|
||||
{"optimize_scans", baton->jpegOptimiseScans ? "TRUE": "FALSE"},
|
||||
{"optimize_coding", "TRUE"}
|
||||
{"optimize_coding", baton->jpegOptimiseCoding ? "TRUE": "FALSE"}
|
||||
};
|
||||
suffix = AssembleSuffixString(extname, options);
|
||||
}
|
||||
// Write DZ to file
|
||||
image.dzsave(const_cast<char*>(baton->fileOut.data()), VImage::option()
|
||||
->set("strip", !baton->withMetadata)
|
||||
->set("tile_size", baton->tileSize)
|
||||
->set("overlap", baton->tileOverlap)
|
||||
->set("container", baton->tileContainer)
|
||||
->set("layout", baton->tileLayout)
|
||||
->set("suffix", const_cast<char*>(suffix.data()))
|
||||
->set("angle", CalculateAngleRotation(baton->tileAngle)));
|
||||
vips::VOption *options = VImage::option()
|
||||
->set("strip", !baton->withMetadata)
|
||||
->set("tile_size", baton->tileSize)
|
||||
->set("overlap", baton->tileOverlap)
|
||||
->set("container", baton->tileContainer)
|
||||
->set("layout", baton->tileLayout)
|
||||
->set("suffix", const_cast<char*>(suffix.data()))
|
||||
->set("angle", CalculateAngleRotation(baton->tileAngle));
|
||||
|
||||
// libvips chooses a default depth based on layout. Instead of replicating that logic here by
|
||||
// not passing anything - libvips will handle choice
|
||||
if (baton->tileDepth < VIPS_FOREIGN_DZ_DEPTH_LAST) {
|
||||
options->set("depth", baton->tileDepth);
|
||||
}
|
||||
|
||||
image.dzsave(const_cast<char*>(baton->fileOut.data()), options);
|
||||
baton->formatOut = "dz";
|
||||
} else if (baton->formatOut == "v" || (mightMatchInput && isV) ||
|
||||
(willMatchInput && inputImageType == ImageType::VIPS)) {
|
||||
@@ -993,6 +982,12 @@ class PipelineWorker : public Nan::AsyncWorker {
|
||||
Set(info, New("cropOffsetTop").ToLocalChecked(),
|
||||
New<v8::Int32>(static_cast<int32_t>(baton->cropOffsetTop)));
|
||||
}
|
||||
if (baton->trimThreshold > 0.0) {
|
||||
Set(info, New("trimOffsetLeft").ToLocalChecked(),
|
||||
New<v8::Int32>(static_cast<int32_t>(baton->trimOffsetLeft)));
|
||||
Set(info, New("trimOffsetTop").ToLocalChecked(),
|
||||
New<v8::Int32>(static_cast<int32_t>(baton->trimOffsetTop)));
|
||||
}
|
||||
|
||||
if (baton->bufferOutLength > 0) {
|
||||
// Pass ownership of output data to Buffer instance
|
||||
@@ -1124,6 +1119,7 @@ NAN_METHOD(pipeline) {
|
||||
using sharp::AttrTo;
|
||||
using sharp::AttrAs;
|
||||
using sharp::AttrAsStr;
|
||||
using sharp::AttrAsRgba;
|
||||
using sharp::CreateInputDescriptor;
|
||||
|
||||
// Input Buffers must not undergo GC compaction during processing
|
||||
@@ -1167,11 +1163,6 @@ NAN_METHOD(pipeline) {
|
||||
} else if (canvas == "ignore_aspect") {
|
||||
baton->canvas = Canvas::IGNORE_ASPECT;
|
||||
}
|
||||
// Background colour
|
||||
v8::Local<v8::Object> background = AttrAs<v8::Object>(options, "background");
|
||||
for (unsigned int i = 0; i < 4; i++) {
|
||||
baton->background[i] = AttrTo<double>(background, i);
|
||||
}
|
||||
// Tint chroma
|
||||
baton->tintA = AttrTo<double>(options, "tintA");
|
||||
baton->tintB = AttrTo<double>(options, "tintB");
|
||||
@@ -1186,8 +1177,8 @@ NAN_METHOD(pipeline) {
|
||||
}
|
||||
// Resize options
|
||||
baton->withoutEnlargement = AttrTo<bool>(options, "withoutEnlargement");
|
||||
baton->crop = AttrTo<int32_t>(options, "crop");
|
||||
baton->embed = AttrTo<int32_t>(options, "embed");
|
||||
baton->position = AttrTo<int32_t>(options, "position");
|
||||
baton->resizeBackground = AttrAsRgba(options, "resizeBackground");
|
||||
baton->kernel = AttrAsStr(options, "kernel");
|
||||
baton->fastShrinkOnLoad = AttrTo<bool>(options, "fastShrinkOnLoad");
|
||||
// Join Channel Options
|
||||
@@ -1205,6 +1196,7 @@ NAN_METHOD(pipeline) {
|
||||
}
|
||||
// Operators
|
||||
baton->flatten = AttrTo<bool>(options, "flatten");
|
||||
baton->flattenBackground = AttrAsRgba(options, "flattenBackground");
|
||||
baton->negate = AttrTo<bool>(options, "negate");
|
||||
baton->blurSigma = AttrTo<double>(options, "blurSigma");
|
||||
baton->medianSize = AttrTo<uint32_t>(options, "medianSize");
|
||||
@@ -1213,14 +1205,17 @@ NAN_METHOD(pipeline) {
|
||||
baton->sharpenJagged = AttrTo<double>(options, "sharpenJagged");
|
||||
baton->threshold = AttrTo<int32_t>(options, "threshold");
|
||||
baton->thresholdGrayscale = AttrTo<bool>(options, "thresholdGrayscale");
|
||||
baton->trimTolerance = AttrTo<int32_t>(options, "trimTolerance");
|
||||
baton->trimThreshold = AttrTo<double>(options, "trimThreshold");
|
||||
baton->gamma = AttrTo<double>(options, "gamma");
|
||||
baton->gammaOut = AttrTo<double>(options, "gammaOut");
|
||||
baton->linearA = AttrTo<double>(options, "linearA");
|
||||
baton->linearB = AttrTo<double>(options, "linearB");
|
||||
baton->greyscale = AttrTo<bool>(options, "greyscale");
|
||||
baton->normalise = AttrTo<bool>(options, "normalise");
|
||||
baton->useExifOrientation = AttrTo<bool>(options, "useExifOrientation");
|
||||
baton->angle = AttrTo<int32_t>(options, "angle");
|
||||
baton->rotationAngle = AttrTo<double>(options, "rotationAngle");
|
||||
baton->rotationBackground = AttrAsRgba(options, "rotationBackground");
|
||||
baton->rotateBeforePreExtract = AttrTo<bool>(options, "rotateBeforePreExtract");
|
||||
baton->flip = AttrTo<bool>(options, "flip");
|
||||
baton->flop = AttrTo<bool>(options, "flop");
|
||||
@@ -1228,7 +1223,10 @@ NAN_METHOD(pipeline) {
|
||||
baton->extendBottom = AttrTo<int32_t>(options, "extendBottom");
|
||||
baton->extendLeft = AttrTo<int32_t>(options, "extendLeft");
|
||||
baton->extendRight = AttrTo<int32_t>(options, "extendRight");
|
||||
baton->extendBackground = AttrAsRgba(options, "extendBackground");
|
||||
baton->extractChannel = AttrTo<int32_t>(options, "extractChannel");
|
||||
|
||||
baton->removeAlpha = AttrTo<bool>(options, "removeAlpha");
|
||||
if (HasAttr(options, "boolean")) {
|
||||
baton->boolean = CreateInputDescriptor(AttrAs<v8::Object>(options, "boolean"), buffersToPersist);
|
||||
baton->booleanOp = sharp::GetBooleanOperation(AttrAsStr(options, "booleanOp"));
|
||||
@@ -1249,6 +1247,13 @@ NAN_METHOD(pipeline) {
|
||||
baton->convKernel[i] = AttrTo<double>(kdata, i);
|
||||
}
|
||||
}
|
||||
if (HasAttr(options, "recombMatrix")) {
|
||||
baton->recombMatrix = std::unique_ptr<double[]>(new double[9]);
|
||||
v8::Local<v8::Array> recombMatrix = AttrAs<v8::Array>(options, "recombMatrix");
|
||||
for (unsigned int i = 0; i < 9; i++) {
|
||||
baton->recombMatrix[i] = AttrTo<double>(recombMatrix, i);
|
||||
}
|
||||
}
|
||||
baton->colourspace = sharp::GetInterpretation(AttrAsStr(options, "colourspace"));
|
||||
if (baton->colourspace == VIPS_INTERPRETATION_ERROR) {
|
||||
baton->colourspace = VIPS_INTERPRETATION_sRGB;
|
||||
@@ -1263,8 +1268,10 @@ NAN_METHOD(pipeline) {
|
||||
baton->jpegProgressive = AttrTo<bool>(options, "jpegProgressive");
|
||||
baton->jpegChromaSubsampling = AttrAsStr(options, "jpegChromaSubsampling");
|
||||
baton->jpegTrellisQuantisation = AttrTo<bool>(options, "jpegTrellisQuantisation");
|
||||
baton->jpegQuantisationTable = AttrTo<uint32_t>(options, "jpegQuantisationTable");
|
||||
baton->jpegOvershootDeringing = AttrTo<bool>(options, "jpegOvershootDeringing");
|
||||
baton->jpegOptimiseScans = AttrTo<bool>(options, "jpegOptimiseScans");
|
||||
baton->jpegOptimiseCoding = AttrTo<bool>(options, "jpegOptimiseCoding");
|
||||
baton->pngProgressive = AttrTo<bool>(options, "pngProgressive");
|
||||
baton->pngCompressionLevel = AttrTo<uint32_t>(options, "pngCompressionLevel");
|
||||
baton->pngAdaptiveFiltering = AttrTo<bool>(options, "pngAdaptiveFiltering");
|
||||
@@ -1273,7 +1280,11 @@ NAN_METHOD(pipeline) {
|
||||
baton->webpLossless = AttrTo<bool>(options, "webpLossless");
|
||||
baton->webpNearLossless = AttrTo<bool>(options, "webpNearLossless");
|
||||
baton->tiffQuality = AttrTo<uint32_t>(options, "tiffQuality");
|
||||
baton->tiffPyramid = AttrTo<bool>(options, "tiffPyramid");
|
||||
baton->tiffSquash = AttrTo<bool>(options, "tiffSquash");
|
||||
baton->tiffTile = AttrTo<bool>(options, "tiffTile");
|
||||
baton->tiffTileWidth = AttrTo<uint32_t>(options, "tiffTileWidth");
|
||||
baton->tiffTileHeight = AttrTo<uint32_t>(options, "tiffTileHeight");
|
||||
baton->tiffXres = AttrTo<double>(options, "tiffXres");
|
||||
baton->tiffYres = AttrTo<double>(options, "tiffYres");
|
||||
// tiff compression options
|
||||
@@ -1303,10 +1314,21 @@ NAN_METHOD(pipeline) {
|
||||
baton->tileLayout = VIPS_FOREIGN_DZ_LAYOUT_DZ;
|
||||
}
|
||||
baton->tileFormat = AttrAsStr(options, "tileFormat");
|
||||
std::string tileDepth = AttrAsStr(options, "tileDepth");
|
||||
if (tileDepth == "onetile") {
|
||||
baton->tileDepth = VIPS_FOREIGN_DZ_DEPTH_ONETILE;
|
||||
} else if (tileDepth == "one") {
|
||||
baton->tileDepth = VIPS_FOREIGN_DZ_DEPTH_ONE;
|
||||
} else if (tileDepth == "onepixel") {
|
||||
baton->tileDepth = VIPS_FOREIGN_DZ_DEPTH_ONEPIXEL;
|
||||
} else {
|
||||
// signal that we do not want to pass any value to dzSave
|
||||
baton->tileDepth = VIPS_FOREIGN_DZ_DEPTH_LAST;
|
||||
}
|
||||
// Force random access for certain operations
|
||||
if (baton->accessMethod == VIPS_ACCESS_SEQUENTIAL && (
|
||||
baton->trimTolerance != 0 || baton->normalise ||
|
||||
baton->crop == 16 || baton->crop == 17)) {
|
||||
baton->trimThreshold > 0.0 || baton->normalise ||
|
||||
baton->position == 16 || baton->position == 17)) {
|
||||
baton->accessMethod = VIPS_ACCESS_RANDOM;
|
||||
}
|
||||
|
||||
|
||||
@@ -61,18 +61,18 @@ struct PipelineBaton {
|
||||
int height;
|
||||
int channels;
|
||||
Canvas canvas;
|
||||
int crop;
|
||||
int embed;
|
||||
int position;
|
||||
std::vector<double> resizeBackground;
|
||||
bool hasCropOffset;
|
||||
int cropOffsetLeft;
|
||||
int cropOffsetTop;
|
||||
bool premultiplied;
|
||||
std::string kernel;
|
||||
bool fastShrinkOnLoad;
|
||||
double background[4];
|
||||
double tintA;
|
||||
double tintB;
|
||||
bool flatten;
|
||||
std::vector<double> flattenBackground;
|
||||
bool negate;
|
||||
double blurSigma;
|
||||
int medianSize;
|
||||
@@ -81,14 +81,19 @@ struct PipelineBaton {
|
||||
double sharpenJagged;
|
||||
int threshold;
|
||||
bool thresholdGrayscale;
|
||||
int trimTolerance;
|
||||
double trimThreshold;
|
||||
int trimOffsetLeft;
|
||||
int trimOffsetTop;
|
||||
double linearA;
|
||||
double linearB;
|
||||
double gamma;
|
||||
double gammaOut;
|
||||
bool greyscale;
|
||||
bool normalise;
|
||||
bool useExifOrientation;
|
||||
int angle;
|
||||
double rotationAngle;
|
||||
std::vector<double> rotationBackground;
|
||||
bool rotateBeforePreExtract;
|
||||
bool flip;
|
||||
bool flop;
|
||||
@@ -96,14 +101,17 @@ struct PipelineBaton {
|
||||
int extendBottom;
|
||||
int extendLeft;
|
||||
int extendRight;
|
||||
std::vector<double> extendBackground;
|
||||
bool withoutEnlargement;
|
||||
VipsAccess accessMethod;
|
||||
int jpegQuality;
|
||||
bool jpegProgressive;
|
||||
std::string jpegChromaSubsampling;
|
||||
bool jpegTrellisQuantisation;
|
||||
int jpegQuantisationTable;
|
||||
bool jpegOvershootDeringing;
|
||||
bool jpegOptimiseScans;
|
||||
bool jpegOptimiseCoding;
|
||||
bool pngProgressive;
|
||||
int pngCompressionLevel;
|
||||
bool pngAdaptiveFiltering;
|
||||
@@ -114,7 +122,11 @@ struct PipelineBaton {
|
||||
int tiffQuality;
|
||||
VipsForeignTiffCompression tiffCompression;
|
||||
VipsForeignTiffPredictor tiffPredictor;
|
||||
bool tiffPyramid;
|
||||
bool tiffSquash;
|
||||
bool tiffTile;
|
||||
int tiffTileHeight;
|
||||
int tiffTileWidth;
|
||||
double tiffXres;
|
||||
double tiffYres;
|
||||
std::string err;
|
||||
@@ -129,6 +141,7 @@ struct PipelineBaton {
|
||||
VipsOperationBoolean booleanOp;
|
||||
VipsOperationBoolean bandBoolOp;
|
||||
int extractChannel;
|
||||
bool removeAlpha;
|
||||
VipsInterpretation colourspace;
|
||||
int tileSize;
|
||||
int tileOverlap;
|
||||
@@ -136,6 +149,8 @@ struct PipelineBaton {
|
||||
VipsForeignDzLayout tileLayout;
|
||||
std::string tileFormat;
|
||||
int tileAngle;
|
||||
VipsForeignDzDepth tileDepth;
|
||||
std::unique_ptr<double[]> recombMatrix;
|
||||
|
||||
PipelineBaton():
|
||||
input(nullptr),
|
||||
@@ -151,15 +166,16 @@ struct PipelineBaton {
|
||||
topOffsetPost(-1),
|
||||
channels(0),
|
||||
canvas(Canvas::CROP),
|
||||
crop(0),
|
||||
embed(0),
|
||||
position(0),
|
||||
resizeBackground{ 0.0, 0.0, 0.0, 255.0 },
|
||||
hasCropOffset(false),
|
||||
cropOffsetLeft(0),
|
||||
cropOffsetTop(0),
|
||||
premultiplied(false),
|
||||
tintA(0.0),
|
||||
tintB(0.0),
|
||||
tintA(128.0),
|
||||
tintB(128.0),
|
||||
flatten(false),
|
||||
flattenBackground{ 0.0, 0.0, 0.0 },
|
||||
negate(false),
|
||||
blurSigma(0.0),
|
||||
medianSize(0),
|
||||
@@ -168,7 +184,9 @@ struct PipelineBaton {
|
||||
sharpenJagged(2.0),
|
||||
threshold(0),
|
||||
thresholdGrayscale(true),
|
||||
trimTolerance(0),
|
||||
trimThreshold(0.0),
|
||||
trimOffsetLeft(0),
|
||||
trimOffsetTop(0),
|
||||
linearA(1.0),
|
||||
linearB(0.0),
|
||||
gamma(0.0),
|
||||
@@ -176,19 +194,24 @@ struct PipelineBaton {
|
||||
normalise(false),
|
||||
useExifOrientation(false),
|
||||
angle(0),
|
||||
rotationAngle(0.0),
|
||||
rotationBackground{ 0.0, 0.0, 0.0, 255.0 },
|
||||
flip(false),
|
||||
flop(false),
|
||||
extendTop(0),
|
||||
extendBottom(0),
|
||||
extendLeft(0),
|
||||
extendRight(0),
|
||||
extendBackground{ 0.0, 0.0, 0.0, 255.0 },
|
||||
withoutEnlargement(false),
|
||||
jpegQuality(80),
|
||||
jpegProgressive(false),
|
||||
jpegChromaSubsampling("4:2:0"),
|
||||
jpegTrellisQuantisation(false),
|
||||
jpegQuantisationTable(0),
|
||||
jpegOvershootDeringing(false),
|
||||
jpegOptimiseScans(false),
|
||||
jpegOptimiseCoding(true),
|
||||
pngProgressive(false),
|
||||
pngCompressionLevel(9),
|
||||
pngAdaptiveFiltering(false),
|
||||
@@ -196,7 +219,11 @@ struct PipelineBaton {
|
||||
tiffQuality(80),
|
||||
tiffCompression(VIPS_FOREIGN_TIFF_COMPRESSION_JPEG),
|
||||
tiffPredictor(VIPS_FOREIGN_TIFF_PREDICTOR_HORIZONTAL),
|
||||
tiffPyramid(false),
|
||||
tiffSquash(false),
|
||||
tiffTile(false),
|
||||
tiffTileHeight(256),
|
||||
tiffTileWidth(256),
|
||||
tiffXres(1.0),
|
||||
tiffYres(1.0),
|
||||
withMetadata(false),
|
||||
@@ -209,17 +236,14 @@ struct PipelineBaton {
|
||||
booleanOp(VIPS_OPERATION_BOOLEAN_LAST),
|
||||
bandBoolOp(VIPS_OPERATION_BOOLEAN_LAST),
|
||||
extractChannel(-1),
|
||||
removeAlpha(false),
|
||||
colourspace(VIPS_INTERPRETATION_LAST),
|
||||
tileSize(256),
|
||||
tileOverlap(0),
|
||||
tileContainer(VIPS_FOREIGN_DZ_CONTAINER_FS),
|
||||
tileLayout(VIPS_FOREIGN_DZ_LAYOUT_DZ),
|
||||
tileAngle(0){
|
||||
background[0] = 0.0;
|
||||
background[1] = 0.0;
|
||||
background[2] = 0.0;
|
||||
background[3] = 255.0;
|
||||
}
|
||||
tileAngle(0),
|
||||
tileDepth(VIPS_FOREIGN_DZ_DEPTH_LAST) {}
|
||||
};
|
||||
|
||||
#endif // SRC_PIPELINE_H_
|
||||
|
||||
19
src/stats.cc
@@ -59,7 +59,6 @@ class StatsWorker : public Nan::AsyncWorker {
|
||||
using sharp::MaximumImageAlpha;
|
||||
|
||||
vips::VImage image;
|
||||
vips::VImage stats;
|
||||
sharp::ImageType imageType = sharp::ImageType::UNKNOWN;
|
||||
|
||||
try {
|
||||
@@ -69,9 +68,8 @@ class StatsWorker : public Nan::AsyncWorker {
|
||||
}
|
||||
if (imageType != sharp::ImageType::UNKNOWN) {
|
||||
try {
|
||||
stats = image.stats();
|
||||
int bands = image.bands();
|
||||
double const max = MaximumImageAlpha(image.interpretation());
|
||||
vips::VImage stats = image.stats();
|
||||
int const bands = image.bands();
|
||||
for (int b = 1; b <= bands; b++) {
|
||||
ChannelStats cStats(static_cast<int>(stats.getpoint(STAT_MIN_INDEX, b).front()),
|
||||
static_cast<int>(stats.getpoint(STAT_MAX_INDEX, b).front()),
|
||||
@@ -83,11 +81,15 @@ class StatsWorker : public Nan::AsyncWorker {
|
||||
static_cast<int>(stats.getpoint(STAT_MAXY_INDEX, b).front()));
|
||||
baton->channelStats.push_back(cStats);
|
||||
}
|
||||
|
||||
// alpha layer is there and the last band i.e. alpha has its max value greater than 0)
|
||||
if (sharp::HasAlpha(image) && stats.getpoint(STAT_MIN_INDEX, bands).front() != max) {
|
||||
baton->isOpaque = false;
|
||||
// Image is not opaque when alpha layer is present and contains a non-mamixa value
|
||||
if (sharp::HasAlpha(image)) {
|
||||
double const minAlpha = static_cast<double>(stats.getpoint(STAT_MIN_INDEX, bands).front());
|
||||
if (minAlpha != MaximumImageAlpha(image.interpretation())) {
|
||||
baton->isOpaque = false;
|
||||
}
|
||||
}
|
||||
// Estimate entropy via histogram of greyscale value frequency
|
||||
baton->entropy = std::abs(image.colourspace(VIPS_INTERPRETATION_B_W)[0].hist_find().hist_entropy());
|
||||
} catch (vips::VError const &err) {
|
||||
(baton->err).append(err.what());
|
||||
}
|
||||
@@ -130,6 +132,7 @@ class StatsWorker : public Nan::AsyncWorker {
|
||||
|
||||
Set(info, New("channels").ToLocalChecked(), channels);
|
||||
Set(info, New("isOpaque").ToLocalChecked(), New<v8::Boolean>(baton->isOpaque));
|
||||
Set(info, New("entropy").ToLocalChecked(), New<v8::Number>(baton->entropy));
|
||||
argv[1] = info;
|
||||
}
|
||||
|
||||
|
||||
@@ -51,12 +51,14 @@ struct StatsBaton {
|
||||
// Output
|
||||
std::vector<ChannelStats> channelStats;
|
||||
bool isOpaque;
|
||||
double entropy;
|
||||
|
||||
std::string err;
|
||||
|
||||
StatsBaton():
|
||||
input(nullptr),
|
||||
isOpaque(true)
|
||||
isOpaque(true),
|
||||
entropy(0.0)
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -259,7 +259,7 @@ NAN_METHOD(_maxColourDistance) {
|
||||
}
|
||||
// Calculate colour distance
|
||||
maxColourDistance = image1.dE00(image2).max();
|
||||
} catch (VError err) {
|
||||
} catch (VError const &err) {
|
||||
return ThrowError(err.what());
|
||||
}
|
||||
|
||||
|
||||
@@ -8,19 +8,17 @@
|
||||
"test": "node perf && node random && node parallel"
|
||||
},
|
||||
"devDependencies": {
|
||||
"async": "^2.6.0",
|
||||
"async": "^2.6.1",
|
||||
"benchmark": "^2.1.4",
|
||||
"gm": "^1.23.1",
|
||||
"imagemagick": "^0.1.3",
|
||||
"imagemagick-native": "^1.9.3",
|
||||
"images": "^3.0.1",
|
||||
"jimp": "^0.2.28",
|
||||
"mapnik": "^3.6.2",
|
||||
"pajk-lwip": "^0.2.0",
|
||||
"semver": "^5.4.1"
|
||||
"jimp": "^0.5.3",
|
||||
"mapnik": "^4.0.1",
|
||||
"semver": "^5.5.1"
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,24 +12,12 @@ const gm = require('gm');
|
||||
const imagemagick = require('imagemagick');
|
||||
const mapnik = require('mapnik');
|
||||
const jimp = require('jimp');
|
||||
let images;
|
||||
try {
|
||||
images = require('images');
|
||||
} catch (err) {
|
||||
console.log('Excluding node-images');
|
||||
}
|
||||
let imagemagickNative;
|
||||
try {
|
||||
imagemagickNative = require('imagemagick-native');
|
||||
} catch (err) {
|
||||
console.log('Excluding imagemagick-native');
|
||||
}
|
||||
let lwip;
|
||||
try {
|
||||
lwip = require('pajk-lwip');
|
||||
} catch (err) {
|
||||
console.log('Excluding lwip');
|
||||
}
|
||||
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
@@ -38,8 +26,6 @@ const height = 588;
|
||||
|
||||
// Disable libvips cache to ensure tests are as fair as they can be
|
||||
sharp.cache(false);
|
||||
// Enable use of SIMD
|
||||
sharp.simd(true);
|
||||
|
||||
async.series({
|
||||
'jpeg': function (callback) {
|
||||
@@ -54,7 +40,7 @@ async.series({
|
||||
throw err;
|
||||
} else {
|
||||
image
|
||||
.resize(width, height)
|
||||
.resize(width, height, jimp.RESIZE_BICUBIC)
|
||||
.quality(80)
|
||||
.getBuffer(jimp.MIME_JPEG, function (err) {
|
||||
if (err) {
|
||||
@@ -74,7 +60,7 @@ async.series({
|
||||
throw err;
|
||||
} else {
|
||||
image
|
||||
.resize(width, height)
|
||||
.resize(width, height, jimp.RESIZE_BICUBIC)
|
||||
.quality(80)
|
||||
.write(fixtures.outputJpg, function (err) {
|
||||
if (err) {
|
||||
@@ -87,51 +73,6 @@ async.series({
|
||||
});
|
||||
}
|
||||
});
|
||||
// lwip
|
||||
if (typeof lwip !== 'undefined') {
|
||||
jpegSuite.add('lwip-file-file', {
|
||||
defer: true,
|
||||
fn: function (deferred) {
|
||||
lwip.open(fixtures.inputJpg, function (err, image) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
image.resize(width, height, 'lanczos', function (err, image) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
image.writeFile(fixtures.outputJpg, {quality: 80}, function (err) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
deferred.resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}).add('lwip-buffer-buffer', {
|
||||
defer: true,
|
||||
fn: function (deferred) {
|
||||
lwip.open(inputJpgBuffer, 'jpg', function (err, image) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
image.resize(width, height, 'lanczos', function (err, image) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
image.toBuffer('jpg', {quality: 80}, function (err, buffer) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
assert.notStrictEqual(null, buffer);
|
||||
deferred.resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
// mapnik
|
||||
jpegSuite.add('mapnik-file-file', {
|
||||
defer: true,
|
||||
@@ -272,14 +213,6 @@ async.series({
|
||||
});
|
||||
}
|
||||
});
|
||||
// images
|
||||
if (typeof images !== 'undefined') {
|
||||
jpegSuite.add('images-file-file', function () {
|
||||
images(fixtures.inputJpg)
|
||||
.resize(width, height)
|
||||
.save(fixtures.outputJpg, { quality: 80 });
|
||||
});
|
||||
}
|
||||
// sharp
|
||||
jpegSuite.add('sharp-buffer-file', {
|
||||
defer: true,
|
||||
@@ -569,8 +502,10 @@ async.series({
|
||||
defer: true,
|
||||
fn: function (deferred) {
|
||||
sharp(inputJpgBuffer)
|
||||
.resize(width, height)
|
||||
.crop(sharp.strategy.entropy)
|
||||
.resize(width, height, {
|
||||
fit: 'cover',
|
||||
position: sharp.strategy.entropy
|
||||
})
|
||||
.toBuffer(function (err, buffer) {
|
||||
if (err) {
|
||||
throw err;
|
||||
@@ -584,8 +519,10 @@ async.series({
|
||||
defer: true,
|
||||
fn: function (deferred) {
|
||||
sharp(inputJpgBuffer)
|
||||
.resize(width, height)
|
||||
.crop(sharp.strategy.attention)
|
||||
.resize(width, height, {
|
||||
fit: 'cover',
|
||||
position: sharp.strategy.attention
|
||||
})
|
||||
.toBuffer(function (err, buffer) {
|
||||
if (err) {
|
||||
throw err;
|
||||
@@ -696,31 +633,6 @@ async.series({
|
||||
});
|
||||
}
|
||||
});
|
||||
// lwip
|
||||
if (typeof lwip !== 'undefined') {
|
||||
pngSuite.add('lwip-buffer-buffer', {
|
||||
defer: true,
|
||||
fn: function (deferred) {
|
||||
lwip.open(inputPngBuffer, 'png', function (err, image) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
image.resize(width, height, 'lanczos', function (err, image) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
image.toBuffer('png', function (err, buffer) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
assert.notStrictEqual(null, buffer);
|
||||
deferred.resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
// mapnik
|
||||
pngSuite.add('mapnik-file-file', {
|
||||
defer: true,
|
||||
@@ -833,14 +745,6 @@ async.series({
|
||||
});
|
||||
}
|
||||
});
|
||||
// images
|
||||
if (typeof images !== 'undefined') {
|
||||
pngSuite.add('images-file-file', function () {
|
||||
images(fixtures.inputPng)
|
||||
.resize(width, height)
|
||||
.save(fixtures.outputPng);
|
||||
});
|
||||
}
|
||||
// sharp
|
||||
pngSuite.add('sharp-buffer-file', {
|
||||
defer: true,
|
||||
|
||||
BIN
test/fixtures/expected/Landscape_1-recomb-saturation.jpg
vendored
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
test/fixtures/expected/Landscape_1-recomb-sepia.jpg
vendored
Normal file
|
After Width: | Height: | Size: 77 KiB |
BIN
test/fixtures/expected/Landscape_1-recomb-sepia2.jpg
vendored
Normal file
|
After Width: | Height: | Size: 85 KiB |
BIN
test/fixtures/expected/alpha-recomb-sepia.png
vendored
Normal file
|
After Width: | Height: | Size: 209 KiB |
BIN
test/fixtures/expected/extract-alpha-16bit.jpg
vendored
Normal file
|
After Width: | Height: | Size: 685 B |
BIN
test/fixtures/expected/extract-lch.jpg
vendored
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
test/fixtures/expected/fast-shrink-on-load-false.png
vendored
|
Before Width: | Height: | Size: 258 B After Width: | Height: | Size: 270 B |
BIN
test/fixtures/expected/fast-shrink-on-load-true.png
vendored
|
Before Width: | Height: | Size: 263 B After Width: | Height: | Size: 265 B |
BIN
test/fixtures/expected/gamma-in-2.2-out-3.0.jpg
vendored
Normal file
|
After Width: | Height: | Size: 1.8 KiB |
BIN
test/fixtures/expected/rotate-solid-bg.jpg
vendored
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
test/fixtures/expected/rotate-transparent-bg.png
vendored
Normal file
|
After Width: | Height: | Size: 238 KiB |
BIN
test/fixtures/expected/svg14.4.png
vendored
Normal file
|
After Width: | Height: | Size: 340 B |
BIN
test/fixtures/expected/tint-blue.jpg
vendored
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
test/fixtures/expected/tint-cmyk.jpg
vendored
Normal file
|
After Width: | Height: | Size: 26 KiB |
BIN
test/fixtures/expected/tint-green.jpg
vendored
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
test/fixtures/expected/tint-red.jpg
vendored
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
BIN
test/fixtures/expected/tint-sepia.jpg
vendored
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
3
test/fixtures/index.js
vendored
@@ -15,8 +15,7 @@ const fingerprint = function (image, callback) {
|
||||
sharp(image)
|
||||
.greyscale()
|
||||
.normalise()
|
||||
.resize(9, 8)
|
||||
.ignoreAspectRatio()
|
||||
.resize(9, 8, { fit: sharp.fit.fill })
|
||||
.raw()
|
||||
.toBuffer(function (err, data) {
|
||||
if (err) {
|
||||
|
||||
@@ -5,7 +5,7 @@ if ! type valgrind >/dev/null; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl -o ./test/leak/libvips.supp https://raw.githubusercontent.com/jcupitt/libvips/master/libvips.supp
|
||||
curl -s -o ./test/leak/libvips.supp https://raw.githubusercontent.com/libvips/libvips/master/libvips.supp
|
||||
|
||||
for test in ./test/unit/*.js; do
|
||||
G_SLICE=always-malloc G_DEBUG=gc-friendly valgrind \
|
||||
@@ -16,5 +16,5 @@ for test in ./test/unit/*.js; do
|
||||
--show-leak-kinds=definite,indirect,possible \
|
||||
--num-callers=20 \
|
||||
--trace-children=yes \
|
||||
mocha --slow=60000 --timeout=120000 "$test";
|
||||
node node_modules/.bin/mocha --slow=60000 --timeout=120000 --file test/unit/beforeEach.js "$test";
|
||||
done
|
||||
|
||||
@@ -233,7 +233,13 @@
|
||||
...
|
||||
fun:uv__work_done
|
||||
}
|
||||
|
||||
{
|
||||
leak_libuv_FlushForegroundTasks
|
||||
Memcheck:Leak
|
||||
match-leak-kinds: possible
|
||||
...
|
||||
fun:_ZN4node12NodePlatform28FlushForegroundTasksInternalEv
|
||||
}
|
||||
# nodejs warnings
|
||||
{
|
||||
param_nodejs_write_buffer
|
||||
@@ -360,6 +366,17 @@
|
||||
...
|
||||
fun:_ZN2v84base6Thread5StartEv
|
||||
}
|
||||
{
|
||||
leak_nodejs_thread_TracingController
|
||||
Memcheck:Leak
|
||||
match-leak-kinds: possible
|
||||
fun:calloc
|
||||
fun:allocate_dtv
|
||||
fun:_dl_allocate_tls
|
||||
fun:allocate_stack
|
||||
...
|
||||
fun:_ZN4node12NodePlatformC1EiPN2v817TracingControllerE
|
||||
}
|
||||
{
|
||||
leak_nan_FunctionCallbackInfo
|
||||
Memcheck:Leak
|
||||
|
||||
@@ -7,7 +7,6 @@ const async = require('async');
|
||||
const sharp = require('../../');
|
||||
|
||||
const crops = {
|
||||
centre: sharp.gravity.centre,
|
||||
entropy: sharp.strategy.entropy,
|
||||
attention: sharp.strategy.attention
|
||||
};
|
||||
@@ -34,23 +33,35 @@ async.eachLimit(files, concurrency, function (file, done) {
|
||||
const salientHeight = userData[file].bottom - userData[file].top;
|
||||
sharp(filename).metadata(function (err, metadata) {
|
||||
if (err) console.log(err);
|
||||
const marginWidth = metadata.width - salientWidth;
|
||||
const marginHeight = metadata.height - salientHeight;
|
||||
async.each(Object.keys(crops), function (crop, done) {
|
||||
async.parallel([
|
||||
// Left edge accuracy
|
||||
function (done) {
|
||||
sharp(filename).resize(salientWidth, metadata.height).crop(crops[crop]).toBuffer(function (err, data, info) {
|
||||
const accuracy = Math.round(Math.abs(userData[file].left - info.cropCalcLeft) / (metadata.width - salientWidth) * 100);
|
||||
incrementScore(accuracy, crop);
|
||||
done(err);
|
||||
});
|
||||
if (marginWidth) {
|
||||
sharp(filename).resize(salientWidth, metadata.height).crop(crops[crop]).toBuffer(function (err, data, info) {
|
||||
const delta = Math.abs(userData[file].left + info.cropOffsetLeft);
|
||||
const accuracy = Math.round(marginWidth / (marginWidth + delta) * 100);
|
||||
incrementScore(accuracy, crop);
|
||||
done(err);
|
||||
});
|
||||
} else {
|
||||
done();
|
||||
}
|
||||
},
|
||||
// Top edge accuracy
|
||||
function (done) {
|
||||
sharp(filename).resize(metadata.width, salientHeight).crop(crops[crop]).toBuffer(function (err, data, info) {
|
||||
const accuracy = Math.round(Math.abs(userData[file].top - info.cropCalcTop) / (metadata.height - salientHeight) * 100);
|
||||
incrementScore(accuracy, crop);
|
||||
done(err);
|
||||
});
|
||||
if (marginHeight) {
|
||||
sharp(filename).resize(metadata.width, salientHeight).crop(crops[crop]).toBuffer(function (err, data, info) {
|
||||
const delta = Math.abs(userData[file].top + info.cropOffsetTop);
|
||||
const accuracy = Math.round(marginHeight / (marginHeight + delta) * 100);
|
||||
incrementScore(accuracy, crop);
|
||||
done(err);
|
||||
});
|
||||
} else {
|
||||
done();
|
||||
}
|
||||
}
|
||||
], done);
|
||||
}, done);
|
||||
@@ -60,7 +71,7 @@ async.eachLimit(files, concurrency, function (file, done) {
|
||||
Object.keys(scores).forEach(function (accuracy) {
|
||||
report.push(
|
||||
Object.assign({
|
||||
accuracy: parseInt(accuracy, 10)
|
||||
accuracy: Number(accuracy)
|
||||
}, scores[accuracy])
|
||||
);
|
||||
});
|
||||
|
||||
@@ -22,7 +22,7 @@ const median = function (values) {
|
||||
// List of files
|
||||
fs.readdirSync(userDataDir).forEach(function (file) {
|
||||
// Contents of file
|
||||
const lines = fs.readFileSync(path.join(userDataDir, file), {encoding: 'utf-8'}).split(/\r\n/);
|
||||
const lines = fs.readFileSync(path.join(userDataDir, file), { encoding: 'utf-8' }).split(/\r\n/);
|
||||
// First line = number of entries
|
||||
const entries = parseInt(lines[0], 10);
|
||||
// Verify number of entries
|
||||
|
||||
@@ -19,9 +19,10 @@ describe('Alpha transparency', function () {
|
||||
|
||||
it('Flatten to RGB orange', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.flatten()
|
||||
.background({r: 255, g: 102, b: 0})
|
||||
.resize(400, 300)
|
||||
.flatten({
|
||||
background: { r: 255, g: 102, b: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(400, info.width);
|
||||
@@ -32,9 +33,8 @@ describe('Alpha transparency', function () {
|
||||
|
||||
it('Flatten to CSS/hex orange', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.flatten()
|
||||
.background('#ff6600')
|
||||
.resize(400, 300)
|
||||
.flatten({ background: '#ff6600' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(400, info.width);
|
||||
@@ -46,8 +46,9 @@ describe('Alpha transparency', function () {
|
||||
it('Flatten 16-bit PNG with transparency to orange', function (done) {
|
||||
const output = fixtures.path('output.flatten-rgb16-orange.jpg');
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.flatten()
|
||||
.background({r: 255, g: 102, b: 0})
|
||||
.flatten({
|
||||
background: { r: 255, g: 102, b: 0 }
|
||||
})
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
@@ -71,8 +72,7 @@ describe('Alpha transparency', function () {
|
||||
|
||||
it('Ignored for JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.background('#ff0000')
|
||||
.flatten()
|
||||
.flatten({ background: '#ff0000' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
@@ -81,35 +81,45 @@ describe('Alpha transparency', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Enlargement with non-nearest neighbor interpolation shouldn’t cause dark edges', function (done) {
|
||||
it('Enlargement with non-nearest neighbor interpolation shouldn’t cause dark edges', function () {
|
||||
const base = 'alpha-premultiply-enlargement-2048x1536-paper.png';
|
||||
const actual = fixtures.path('output.' + base);
|
||||
const expected = fixtures.expected(base);
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
return sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
.resize(2048, 1536)
|
||||
.toFile(actual, function (err) {
|
||||
if (err) {
|
||||
done(err);
|
||||
} else {
|
||||
fixtures.assertMaxColourDistance(actual, expected, 102);
|
||||
done();
|
||||
}
|
||||
.toFile(actual)
|
||||
.then(function () {
|
||||
fixtures.assertMaxColourDistance(actual, expected, 102);
|
||||
});
|
||||
});
|
||||
|
||||
it('Reduction with non-nearest neighbor interpolation shouldn’t cause dark edges', function (done) {
|
||||
it('Reduction with non-nearest neighbor interpolation shouldn’t cause dark edges', function () {
|
||||
const base = 'alpha-premultiply-reduction-1024x768-paper.png';
|
||||
const actual = fixtures.path('output.' + base);
|
||||
const expected = fixtures.expected(base);
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationLarge)
|
||||
return sharp(fixtures.inputPngAlphaPremultiplicationLarge)
|
||||
.resize(1024, 768)
|
||||
.toFile(actual, function (err) {
|
||||
if (err) {
|
||||
done(err);
|
||||
} else {
|
||||
fixtures.assertMaxColourDistance(actual, expected, 102);
|
||||
done();
|
||||
}
|
||||
.toFile(actual)
|
||||
.then(function () {
|
||||
fixtures.assertMaxColourDistance(actual, expected, 102);
|
||||
});
|
||||
});
|
||||
|
||||
it('Removes alpha from fixtures with transparency, ignores those without', function () {
|
||||
return Promise.all([
|
||||
fixtures.inputPngWithTransparency,
|
||||
fixtures.inputPngWithTransparency16bit,
|
||||
fixtures.inputWebPWithTransparency,
|
||||
fixtures.inputJpg,
|
||||
fixtures.inputPng,
|
||||
fixtures.inputWebP
|
||||
].map(function (input) {
|
||||
return sharp(input)
|
||||
.removeAlpha()
|
||||
.toBuffer({ resolveWithObject: true })
|
||||
.then(function (result) {
|
||||
assert.strictEqual(3, result.info.channels);
|
||||
});
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
||||
12
test/unit/beforeEach.js
Normal file
@@ -0,0 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
const detectLibc = require('detect-libc');
|
||||
const sharp = require('../../');
|
||||
|
||||
const usingCache = detectLibc.family !== detectLibc.MUSL;
|
||||
const usingSimd = !process.env.G_DEBUG;
|
||||
|
||||
beforeEach(function () {
|
||||
sharp.cache(usingCache);
|
||||
sharp.simd(usingSimd);
|
||||
});
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const sharp = require('../../');
|
||||
|
||||
// Define SHARP_TEST_WITHOUT_CACHE environment variable to prevent use of libvips' cache
|
||||
|
||||
beforeEach(function () {
|
||||
sharp.cache(!process.env.SHARP_TEST_WITHOUT_CACHE);
|
||||
});
|
||||
@@ -69,9 +69,10 @@ describe('Colour space conversion', function () {
|
||||
|
||||
it('From CMYK to sRGB with white background, not yellow', function (done) {
|
||||
sharp(fixtures.inputJpgWithCmykProfile)
|
||||
.resize(320, 240)
|
||||
.background('white')
|
||||
.embed()
|
||||
.resize(320, 240, {
|
||||
fit: sharp.fit.contain,
|
||||
background: 'white'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
|
||||
73
test/unit/deprecated-background.js
Normal file
@@ -0,0 +1,73 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fixtures = require('../fixtures');
|
||||
const sharp = require('../../');
|
||||
|
||||
describe('Deprecated background', function () {
|
||||
it('Flatten to RGB orange', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.flatten()
|
||||
.background({ r: 255, g: 102, b: 0 })
|
||||
.resize(400, 300)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(400, info.width);
|
||||
assert.strictEqual(300, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('flatten-orange.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Flatten to CSS/hex orange', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.flatten()
|
||||
.background('#ff6600')
|
||||
.resize(400, 300)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(400, info.width);
|
||||
assert.strictEqual(300, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('flatten-orange.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Flatten 16-bit PNG with transparency to orange', function (done) {
|
||||
const output = fixtures.path('output.flatten-rgb16-orange.jpg');
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.flatten()
|
||||
.background({ r: 255, g: 102, b: 0 })
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
assert.strictEqual(32, info.width);
|
||||
assert.strictEqual(32, info.height);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('flatten-rgb16-orange.jpg'), 25);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Ignored for JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.background('#ff0000')
|
||||
.flatten()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3, info.channels);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('extend all sides equally with RGB', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(120)
|
||||
.background({ r: 255, g: 0, b: 0 })
|
||||
.extend(10)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(140, info.width);
|
||||
assert.strictEqual(118, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('extend-equal.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -5,7 +5,7 @@ const assert = require('assert');
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Crop', function () {
|
||||
describe('Deprecated crop', function () {
|
||||
[
|
||||
{
|
||||
name: 'North',
|
||||
@@ -177,25 +177,6 @@ describe('Crop', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Clamp before crop when one post-resize dimension is below target', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(1024, 1034)
|
||||
.toBuffer()
|
||||
.then(function (input) {
|
||||
return sharp(input)
|
||||
.rotate(270)
|
||||
.resize(256)
|
||||
.crop(sharp.strategy.entropy)
|
||||
.toBuffer({ resolveWithObject: true })
|
||||
.then(function (result) {
|
||||
assert.strictEqual(256, result.info.width);
|
||||
assert.strictEqual(253, result.info.height);
|
||||
assert.strictEqual(0, result.info.cropOffsetLeft);
|
||||
assert.strictEqual(0, result.info.cropOffsetTop);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Entropy-based strategy', function () {
|
||||
it('JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
@@ -5,7 +5,7 @@ const assert = require('assert');
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Embed', function () {
|
||||
describe('Deprecated embed', function () {
|
||||
it('Allows specifying the gravity as a string', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
@@ -38,7 +38,7 @@ describe('Embed', function () {
|
||||
it('JPEG within WebP, to include alpha channel', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed()
|
||||
.webp()
|
||||
.toBuffer(function (err, data, info) {
|
||||
@@ -86,7 +86,7 @@ describe('Embed', function () {
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.resize(32, 16)
|
||||
.embed()
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -102,7 +102,7 @@ describe('Embed', function () {
|
||||
sharp(fixtures.inputPngWithGreyAlpha)
|
||||
.resize(32, 16)
|
||||
.embed()
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -114,23 +114,6 @@ describe('Embed', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it.skip('embed TIFF in LAB colourspace onto RGBA background', function (done) {
|
||||
sharp(fixtures.inputTiffCielab)
|
||||
.resize(64, 128)
|
||||
.embed()
|
||||
.background({r: 255, g: 102, b: 0, alpha: 0.5})
|
||||
.png()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(64, info.width);
|
||||
assert.strictEqual(128, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-lab-into-rgba.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Enlarge and embed', function (done) {
|
||||
sharp(fixtures.inputPngWithOneColor)
|
||||
.embed()
|
||||
@@ -170,7 +153,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal northwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.northwest)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -186,7 +169,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal north', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.north)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -202,7 +185,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal northeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.northeast)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -218,7 +201,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal east', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.east)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -234,7 +217,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal southeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.southeast)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -250,7 +233,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal south', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.south)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -266,7 +249,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal southwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.southwest)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -282,7 +265,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal west', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.west)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -298,7 +281,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity horizontal center', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.center)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -314,7 +297,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical northwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.northwest)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -330,7 +313,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical north', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.north)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -346,7 +329,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical northeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.northeast)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -362,7 +345,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical east', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.east)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -378,7 +361,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical southeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.southeast)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -394,7 +377,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical south', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.south)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -410,7 +393,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical southwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.southwest)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -426,7 +409,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical west', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.west)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -442,7 +425,7 @@ describe('Embed', function () {
|
||||
it('Embed gravity vertical center', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.background({ r: 0, g: 0, b: 0, alpha: 0 })
|
||||
.embed(sharp.gravity.center)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
261
test/unit/deprecated-resize.js
Normal file
@@ -0,0 +1,261 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Deprecated resize-related functions', function () {
|
||||
it('Max width or height considering ratio (portrait)', function (done) {
|
||||
sharp(fixtures.inputTiff)
|
||||
.resize(320, 320)
|
||||
.max()
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(243, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Min width or height considering ratio (portrait)', function (done) {
|
||||
sharp(fixtures.inputTiff)
|
||||
.resize(320, 320)
|
||||
.min()
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(422, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Max width or height considering ratio (landscape)', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 320)
|
||||
.max()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(261, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Provide only one dimension with max, should default to crop', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320)
|
||||
.max()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(261, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Min width or height considering ratio (landscape)', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 320)
|
||||
.min()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(392, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Provide only one dimension with min, should default to crop', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320)
|
||||
.min()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(261, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Do not enlarge when input width is already less than output width', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(2800)
|
||||
.withoutEnlargement()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Do not enlarge when input height is already less than output height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(null, 2300)
|
||||
.withoutEnlargement()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Do enlarge when input width is less than output width', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(2800)
|
||||
.withoutEnlargement(false)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2800, info.width);
|
||||
assert.strictEqual(2286, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale width and height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 320)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale width, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(null, 320)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale width and height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(3000, 3000)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale width, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(3000)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(null, 3000)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale width, upscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 3000)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale width, downscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(3000, 320)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Identity transform, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.ignoreAspectRatio()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -9,8 +9,13 @@ describe('Extend', function () {
|
||||
it('extend all sides equally with RGB', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(120)
|
||||
.background({r: 255, g: 0, b: 0})
|
||||
.extend(10)
|
||||
.extend({
|
||||
top: 10,
|
||||
bottom: 10,
|
||||
left: 10,
|
||||
right: 10,
|
||||
background: { r: 255, g: 0, b: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(140, info.width);
|
||||
@@ -22,8 +27,13 @@ describe('Extend', function () {
|
||||
it('extend sides unequally with RGBA', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.resize(120)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.extend({top: 50, bottom: 0, left: 10, right: 35})
|
||||
.extend({
|
||||
top: 50,
|
||||
bottom: 0,
|
||||
left: 10,
|
||||
right: 35,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(165, info.width);
|
||||
@@ -44,15 +54,20 @@ describe('Extend', function () {
|
||||
});
|
||||
it('partial object fails', function () {
|
||||
assert.throws(function () {
|
||||
sharp().extend({top: 1});
|
||||
sharp().extend({ top: 1 });
|
||||
});
|
||||
});
|
||||
|
||||
it('should add alpha channel before extending with a transparent Background', function (done) {
|
||||
sharp(fixtures.inputJpgWithLandscapeExif1)
|
||||
.background({r: 0, g: 0, b: 0, alpha: 0})
|
||||
.extend({
|
||||
top: 0,
|
||||
bottom: 10,
|
||||
left: 0,
|
||||
right: 10,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
})
|
||||
.toFormat(sharp.format.png)
|
||||
.extend({top: 0, bottom: 10, left: 0, right: 10})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(610, info.width);
|
||||
@@ -63,8 +78,13 @@ describe('Extend', function () {
|
||||
|
||||
it('PNG with 2 channels', function (done) {
|
||||
sharp(fixtures.inputPngWithGreyAlpha)
|
||||
.background('transparent')
|
||||
.extend({top: 0, bottom: 20, left: 0, right: 20})
|
||||
.extend({
|
||||
top: 0,
|
||||
bottom: 20,
|
||||
left: 0,
|
||||
right: 20,
|
||||
background: 'transparent'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
|
||||
@@ -69,8 +69,9 @@ describe('Partial image extraction', function () {
|
||||
|
||||
it('After resize and crop', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(500, 500)
|
||||
.crop(sharp.gravity.north)
|
||||
.resize(500, 500, {
|
||||
position: sharp.gravity.north
|
||||
})
|
||||
.extract({ left: 10, top: 10, width: 100, height: 100 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -83,8 +84,9 @@ describe('Partial image extraction', function () {
|
||||
it('Before and after resize and crop', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.extract({ left: 0, top: 0, width: 700, height: 700 })
|
||||
.resize(500, 500)
|
||||
.crop(sharp.gravity.north)
|
||||
.resize(500, 500, {
|
||||
position: sharp.gravity.north
|
||||
})
|
||||
.extract({ left: 10, top: 10, width: 100, height: 100 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -115,7 +117,7 @@ describe('Partial image extraction', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(280, info.width);
|
||||
assert.strictEqual(380, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('rotate-extract.jpg'), data, { threshold: 6 }, done);
|
||||
fixtures.assertSimilar(fixtures.expected('rotate-extract.jpg'), data, { threshold: 7 }, done);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -54,6 +54,32 @@ describe('Image channel extraction', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('With colorspace conversion', function (done) {
|
||||
const output = fixtures.path('output.extract-lch.jpg');
|
||||
sharp(fixtures.inputJpg)
|
||||
.toColourspace('lch')
|
||||
.extractChannel(1)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('extract-lch.jpg'), 9);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Alpha from 16-bit PNG', function (done) {
|
||||
const output = fixtures.path('output.extract-alpha-16bit.jpg');
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.extractChannel(3)
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('extract-alpha-16bit.jpg'));
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid channel number', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
|
||||
@@ -49,8 +49,8 @@ describe('failOnError', function () {
|
||||
it('returns errors to callback for truncated JPEG when failOnError is set', function (done) {
|
||||
sharp(fixtures.inputJpgTruncated, { failOnError: true }).toBuffer(function (err, data, info) {
|
||||
assert.ok(err.message.includes('VipsJpeg: Premature end of JPEG file'), err);
|
||||
assert.equal(data, null);
|
||||
assert.equal(info, null);
|
||||
assert.strictEqual(data, null);
|
||||
assert.strictEqual(info, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
@@ -58,8 +58,8 @@ describe('failOnError', function () {
|
||||
it('returns errors to callback for truncated PNG when failOnError is set', function (done) {
|
||||
sharp(fixtures.inputPngTruncated, { failOnError: true }).toBuffer(function (err, data, info) {
|
||||
assert.ok(err.message.includes('vipspng: libpng read error'), err);
|
||||
assert.equal(data, null);
|
||||
assert.equal(info, null);
|
||||
assert.strictEqual(data, null);
|
||||
assert.strictEqual(info, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -44,6 +44,19 @@ describe('Gamma correction', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('input value of 2.2, output value of 3.0', function (done) {
|
||||
sharp(fixtures.inputJpgWithGammaHoliness)
|
||||
.resize(129, 111)
|
||||
.gamma(2.2, 3.0)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(129, info.width);
|
||||
assert.strictEqual(111, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('gamma-in-2.2-out-3.0.jpg'), data, { threshold: 6 }, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('alpha transparency', function (done) {
|
||||
sharp(fixtures.inputPngOverlayLayer1)
|
||||
.resize(320)
|
||||
@@ -57,9 +70,15 @@ describe('Gamma correction', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('invalid value', function () {
|
||||
it('invalid first parameter value', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpgWithGammaHoliness).gamma(4);
|
||||
});
|
||||
});
|
||||
|
||||
it('invalid second parameter value', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpgWithGammaHoliness).gamma(2.2, 4);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
276
test/unit/io.js
@@ -2,6 +2,7 @@
|
||||
|
||||
const fs = require('fs');
|
||||
const assert = require('assert');
|
||||
const rimraf = require('rimraf');
|
||||
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
@@ -16,7 +17,7 @@ describe('Input/output', function () {
|
||||
|
||||
it('Read from File and write to Stream', function (done) {
|
||||
const writable = fs.createWriteStream(fixtures.outputJpg);
|
||||
writable.on('finish', function () {
|
||||
writable.on('close', function () {
|
||||
sharp(fixtures.outputJpg).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -24,8 +25,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
});
|
||||
sharp(fixtures.inputJpg).resize(320, 240).pipe(writable);
|
||||
@@ -34,7 +34,7 @@ describe('Input/output', function () {
|
||||
it('Read from Buffer and write to Stream', function (done) {
|
||||
const inputJpgBuffer = fs.readFileSync(fixtures.inputJpg);
|
||||
const writable = fs.createWriteStream(fixtures.outputJpg);
|
||||
writable.on('finish', function () {
|
||||
writable.on('close', function () {
|
||||
sharp(fixtures.outputJpg).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -42,8 +42,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
});
|
||||
sharp(inputJpgBuffer).resize(320, 240).pipe(writable);
|
||||
@@ -57,8 +56,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
readable.pipe(pipeline);
|
||||
});
|
||||
@@ -81,7 +79,7 @@ describe('Input/output', function () {
|
||||
const pipeline = sharp().resize(1, 1);
|
||||
fs.createReadStream(fixtures.inputJpg).pipe(pipeline);
|
||||
return pipeline
|
||||
.toBuffer({resolveWithObject: false})
|
||||
.toBuffer({ resolveWithObject: false })
|
||||
.then(function (data) {
|
||||
assert.strictEqual(true, data instanceof Buffer);
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -92,7 +90,7 @@ describe('Input/output', function () {
|
||||
const pipeline = sharp().resize(1, 1);
|
||||
fs.createReadStream(fixtures.inputJpg).pipe(pipeline);
|
||||
return pipeline
|
||||
.toBuffer({resolveWithObject: true})
|
||||
.toBuffer({ resolveWithObject: true })
|
||||
.then(function (object) {
|
||||
assert.strictEqual('object', typeof object);
|
||||
assert.strictEqual('object', typeof object.info);
|
||||
@@ -108,7 +106,7 @@ describe('Input/output', function () {
|
||||
it('Read from File and write to Buffer via Promise resolved with Buffer', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(1, 1)
|
||||
.toBuffer({resolveWithObject: false})
|
||||
.toBuffer({ resolveWithObject: false })
|
||||
.then(function (data) {
|
||||
assert.strictEqual(true, data instanceof Buffer);
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -118,7 +116,7 @@ describe('Input/output', function () {
|
||||
it('Read from File and write to Buffer via Promise resolved with Object', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(1, 1)
|
||||
.toBuffer({resolveWithObject: true})
|
||||
.toBuffer({ resolveWithObject: true })
|
||||
.then(function (object) {
|
||||
assert.strictEqual('object', typeof object);
|
||||
assert.strictEqual('object', typeof object.info);
|
||||
@@ -134,7 +132,7 @@ describe('Input/output', function () {
|
||||
it('Read from Stream and write to Stream', function (done) {
|
||||
const readable = fs.createReadStream(fixtures.inputJpg);
|
||||
const writable = fs.createWriteStream(fixtures.outputJpg);
|
||||
writable.on('finish', function () {
|
||||
writable.on('close', function () {
|
||||
sharp(fixtures.outputJpg).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -142,8 +140,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
});
|
||||
const pipeline = sharp().resize(320, 240);
|
||||
@@ -162,10 +159,9 @@ describe('Input/output', function () {
|
||||
assert.strictEqual(3, info.channels);
|
||||
infoEventEmitted = true;
|
||||
});
|
||||
writable.on('finish', function () {
|
||||
writable.on('close', function () {
|
||||
assert.strictEqual(true, infoEventEmitted);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
readable.pipe(pipeline).pipe(writable);
|
||||
});
|
||||
@@ -177,8 +173,7 @@ describe('Input/output', function () {
|
||||
anErrorWasEmitted = !!err;
|
||||
}).on('end', function () {
|
||||
assert(anErrorWasEmitted);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
const readableButNotAnImage = fs.createReadStream(__filename);
|
||||
const writable = fs.createWriteStream(fixtures.outputJpg);
|
||||
@@ -192,8 +187,7 @@ describe('Input/output', function () {
|
||||
anErrorWasEmitted = !!err;
|
||||
}).on('end', function () {
|
||||
assert(anErrorWasEmitted);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
const writable = fs.createWriteStream(fixtures.outputJpg);
|
||||
readableButNotAnImage.pipe(writable);
|
||||
@@ -202,7 +196,7 @@ describe('Input/output', function () {
|
||||
it('Readable side of Stream can start flowing after Writable side has finished', function (done) {
|
||||
const readable = fs.createReadStream(fixtures.inputJpg);
|
||||
const writable = fs.createWriteStream(fixtures.outputJpg);
|
||||
writable.on('finish', function () {
|
||||
writable.on('close', function () {
|
||||
sharp(fixtures.outputJpg).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -210,8 +204,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fs.unlinkSync(fixtures.outputJpg);
|
||||
done();
|
||||
rimraf(fixtures.outputJpg, done);
|
||||
});
|
||||
});
|
||||
const pipeline = sharp().resize(320, 240);
|
||||
@@ -389,6 +382,16 @@ describe('Input/output', function () {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid JPEG quantisation table', function () {
|
||||
[-1, 88.2, 'test'].forEach(function (table) {
|
||||
it(table.toString(), function () {
|
||||
assert.throws(function () {
|
||||
sharp().jpeg({ quantisationTable: table });
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Progressive JPEG image', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
@@ -459,7 +462,7 @@ describe('Input/output', function () {
|
||||
|
||||
it('should work for webp alpha quality', function (done) {
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
.webp({alphaQuality: 80})
|
||||
.webp({ alphaQuality: 80 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -470,7 +473,7 @@ describe('Input/output', function () {
|
||||
|
||||
it('should work for webp lossless', function (done) {
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
.webp({lossless: true})
|
||||
.webp({ lossless: true })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -481,7 +484,7 @@ describe('Input/output', function () {
|
||||
|
||||
it('should work for webp near-lossless', function (done) {
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
.webp({nearLossless: true, quality: 50})
|
||||
.webp({ nearLossless: true, quality: 50 })
|
||||
.toBuffer(function (err50, data50, info50) {
|
||||
if (err50) throw err50;
|
||||
assert.strictEqual(true, data50.length > 0);
|
||||
@@ -492,7 +495,7 @@ describe('Input/output', function () {
|
||||
|
||||
it('should use near-lossless when both lossless and nearLossless are specified', function (done) {
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
.webp({nearLossless: true, quality: 50, lossless: true})
|
||||
.webp({ nearLossless: true, quality: 50, lossless: true })
|
||||
.toBuffer(function (err50, data50, info50) {
|
||||
if (err50) throw err50;
|
||||
assert.strictEqual(true, data50.length > 0);
|
||||
@@ -554,8 +557,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
fs.unlinkSync(fixtures.outputZoinks);
|
||||
done();
|
||||
rimraf(fixtures.outputZoinks, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -568,8 +570,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
fs.unlinkSync(fixtures.outputZoinks);
|
||||
done();
|
||||
rimraf(fixtures.outputZoinks, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -582,8 +583,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('webp', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
fs.unlinkSync(fixtures.outputZoinks);
|
||||
done();
|
||||
rimraf(fixtures.outputZoinks, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -596,8 +596,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
fs.unlinkSync(fixtures.outputZoinks);
|
||||
done();
|
||||
rimraf(fixtures.outputZoinks, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -610,8 +609,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
fs.unlinkSync(fixtures.outputZoinks);
|
||||
done();
|
||||
rimraf(fixtures.outputZoinks, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -625,8 +623,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
fs.unlinkSync(fixtures.outputZoinks);
|
||||
done();
|
||||
rimraf(fixtures.outputZoinks, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -820,7 +817,68 @@ describe('Input/output', function () {
|
||||
assert.strictEqual(320, withInfo.width);
|
||||
assert.strictEqual(240, withInfo.height);
|
||||
// Verify image is of a different size (progressive output even without mozjpeg)
|
||||
assert.notEqual(withData.length, withoutData.length);
|
||||
assert.notStrictEqual(withData.length, withoutData.length);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Optimise coding generates smaller output length', function (done) {
|
||||
// First generate with optimize coding enabled (default)
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.jpeg()
|
||||
.toBuffer(function (err, withOptimiseCoding, withInfo) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, withOptimiseCoding.length > 0);
|
||||
assert.strictEqual(withOptimiseCoding.length, withInfo.size);
|
||||
assert.strictEqual('jpeg', withInfo.format);
|
||||
assert.strictEqual(320, withInfo.width);
|
||||
assert.strictEqual(240, withInfo.height);
|
||||
// Then generate with coding disabled
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.jpeg({ optimizeCoding: false })
|
||||
.toBuffer(function (err, withoutOptimiseCoding, withoutInfo) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, withoutOptimiseCoding.length > 0);
|
||||
assert.strictEqual(withoutOptimiseCoding.length, withoutInfo.size);
|
||||
assert.strictEqual('jpeg', withoutInfo.format);
|
||||
assert.strictEqual(320, withoutInfo.width);
|
||||
assert.strictEqual(240, withoutInfo.height);
|
||||
// Verify optimised image is of a smaller size
|
||||
assert.strictEqual(true, withOptimiseCoding.length < withoutOptimiseCoding.length);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Specifying quantisation table provides different JPEG', function (done) {
|
||||
// First generate with default quantisation table
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.jpeg({ optimiseCoding: false })
|
||||
.toBuffer(function (err, withDefaultQuantisationTable, withInfo) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, withDefaultQuantisationTable.length > 0);
|
||||
assert.strictEqual(withDefaultQuantisationTable.length, withInfo.size);
|
||||
assert.strictEqual('jpeg', withInfo.format);
|
||||
assert.strictEqual(320, withInfo.width);
|
||||
assert.strictEqual(240, withInfo.height);
|
||||
// Then generate with different quantisation table
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.jpeg({ optimiseCoding: false, quantisationTable: 3 })
|
||||
.toBuffer(function (err, withQuantTable3, withoutInfo) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, withQuantTable3.length > 0);
|
||||
assert.strictEqual(withQuantTable3.length, withoutInfo.size);
|
||||
assert.strictEqual('jpeg', withoutInfo.format);
|
||||
assert.strictEqual(320, withoutInfo.width);
|
||||
assert.strictEqual(240, withoutInfo.height);
|
||||
|
||||
// Verify image is same (as mozjpeg may not be present) size or less
|
||||
assert.strictEqual(true, withQuantTable3.length <= withDefaultQuantisationTable.length);
|
||||
done();
|
||||
});
|
||||
});
|
||||
@@ -829,7 +887,7 @@ describe('Input/output', function () {
|
||||
it('Convert SVG to PNG at default 72DPI', function (done) {
|
||||
sharp(fixtures.inputSvg)
|
||||
.resize(1024)
|
||||
.extract({left: 290, top: 760, width: 40, height: 40})
|
||||
.extract({ left: 290, top: 760, width: 40, height: 40 })
|
||||
.toFormat('png')
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -850,7 +908,7 @@ describe('Input/output', function () {
|
||||
it('Convert SVG to PNG at 1200DPI', function (done) {
|
||||
sharp(fixtures.inputSvg, { density: 1200 })
|
||||
.resize(1024)
|
||||
.extract({left: 290, top: 760, width: 40, height: 40})
|
||||
.extract({ left: 290, top: 760, width: 40, height: 40 })
|
||||
.toFormat('png')
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -868,6 +926,21 @@ describe('Input/output', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Convert SVG to PNG at 14.4DPI', function (done) {
|
||||
sharp(fixtures.inputSvg, { density: 14.4 })
|
||||
.toFormat('png')
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(20, info.width);
|
||||
assert.strictEqual(20, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('svg14.4.png'), data, function (err) {
|
||||
if (err) throw err;
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Convert SVG with embedded images to PNG, respecting dimensions, autoconvert to PNG', function (done) {
|
||||
sharp(fixtures.inputSvgWithEmbeddedImages)
|
||||
.toBuffer(function (err, data, info) {
|
||||
@@ -994,7 +1067,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size === startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1011,7 +1084,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < (startSize / 2));
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1034,7 +1107,7 @@ describe('Input/output', function () {
|
||||
sharp(fixtures.outputTiff).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(metadata.density, res * 2.54); // convert to dpi
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1079,7 +1152,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1095,7 +1168,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1110,7 +1183,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1125,7 +1198,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1140,7 +1213,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1154,7 +1227,7 @@ describe('Input/output', function () {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size < startSize);
|
||||
fs.unlink(fixtures.outputTiff, done);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1212,6 +1285,84 @@ describe('Input/output', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('TIFF tiled pyramid image without compression enlarges test file', function (done) {
|
||||
const startSize = fs.statSync(fixtures.inputTiffUncompressed).size;
|
||||
sharp(fixtures.inputTiffUncompressed)
|
||||
.tiff({
|
||||
compression: 'none',
|
||||
pyramid: true,
|
||||
tile: true,
|
||||
tileHeight: 256,
|
||||
tileWidth: 256
|
||||
})
|
||||
.toFile(fixtures.outputTiff, (err, info) => {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', info.format);
|
||||
assert(info.size > startSize);
|
||||
rimraf(fixtures.outputTiff, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('TIFF pyramid true value does not throw error', function () {
|
||||
assert.doesNotThrow(function () {
|
||||
sharp().tiff({ pyramid: true });
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid TIFF pyramid value throws error', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tiff({ pyramid: 'true' });
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid TIFF tile value throws error', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tiff({ tile: 'true' });
|
||||
});
|
||||
});
|
||||
|
||||
it('TIFF tile true value does not throw error', function () {
|
||||
assert.doesNotThrow(function () {
|
||||
sharp().tiff({ tile: true });
|
||||
});
|
||||
});
|
||||
|
||||
it('Valid TIFF tileHeight value does not throw error', function () {
|
||||
assert.doesNotThrow(function () {
|
||||
sharp().tiff({ tileHeight: 512 });
|
||||
});
|
||||
});
|
||||
|
||||
it('Valid TIFF tileWidth value does not throw error', function () {
|
||||
assert.doesNotThrow(function () {
|
||||
sharp().tiff({ tileWidth: 512 });
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid TIFF tileHeight value throws error', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tiff({ tileHeight: '256' });
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid TIFF tileWidth value throws error', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tiff({ tileWidth: '256' });
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid TIFF tileHeight value throws error', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tiff({ tileHeight: 0 });
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid TIFF tileWidth value throws error', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tiff({ tileWidth: 0 });
|
||||
});
|
||||
});
|
||||
|
||||
it('Input and output formats match when not forcing', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
@@ -1291,15 +1442,14 @@ describe('Input/output', function () {
|
||||
|
||||
it('Save Vips V file', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.extract({left: 910, top: 1105, width: 70, height: 60})
|
||||
.extract({ left: 910, top: 1105, width: 70, height: 60 })
|
||||
.toFile(fixtures.outputV, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
assert.strictEqual('v', info.format);
|
||||
assert.strictEqual(70, info.width);
|
||||
assert.strictEqual(60, info.height);
|
||||
fs.unlinkSync(fixtures.outputV);
|
||||
done();
|
||||
rimraf(fixtures.outputV, done);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1316,6 +1466,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual('raw', info.format);
|
||||
assert.strictEqual(32, info.width);
|
||||
assert.strictEqual(24, info.height);
|
||||
assert.strictEqual(1, info.channels);
|
||||
done();
|
||||
});
|
||||
});
|
||||
@@ -1435,11 +1586,6 @@ describe('Input/output', function () {
|
||||
sharp(null, { density: 'zoinks' });
|
||||
});
|
||||
});
|
||||
it('Invalid density: float', function () {
|
||||
assert.throws(function () {
|
||||
sharp(null, { density: 0.5 });
|
||||
});
|
||||
});
|
||||
it('Ignore unknown attribute', function () {
|
||||
sharp(null, { unknown: true });
|
||||
});
|
||||
@@ -1487,7 +1633,7 @@ describe('Input/output', function () {
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
channels: info.channels
|
||||
}})
|
||||
} })
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -1514,7 +1660,7 @@ describe('Input/output', function () {
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
channels: info.channels
|
||||
}})
|
||||
} })
|
||||
.png()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -1616,7 +1762,7 @@ describe('Input/output', function () {
|
||||
assert.strictEqual(472, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
});
|
||||
const badPipeline = sharp(null, {raw: {width: 840, height: 500, channels: 3}})
|
||||
const badPipeline = sharp(null, { raw: { width: 840, height: 500, channels: 3 } })
|
||||
.toFormat('jpeg')
|
||||
.toBuffer(function (err, data, info) {
|
||||
assert.strictEqual(err.message.indexOf('memory area too small') > 0, true);
|
||||
@@ -1624,7 +1770,7 @@ describe('Input/output', function () {
|
||||
const inPipeline = sharp()
|
||||
.resize(840, 472)
|
||||
.raw();
|
||||
const goodPipeline = sharp(null, {raw: {width: 840, height: 472, channels: 3}})
|
||||
const goodPipeline = sharp(null, { raw: { width: 840, height: 472, channels: 3 } })
|
||||
.toFormat('jpeg')
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
|
||||
@@ -138,7 +138,7 @@ describe('Image channel insertion', function () {
|
||||
|
||||
it('Invalid raw buffer description', function () {
|
||||
assert.throws(function () {
|
||||
sharp().joinChannel(fs.readFileSync(fixtures.inputPng), {raw: {}});
|
||||
sharp().joinChannel(fs.readFileSync(fixtures.inputPng), { raw: {} });
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const semver = require('semver');
|
||||
const libvips = require('../../lib/libvips');
|
||||
const mockFS = require('mock-fs');
|
||||
|
||||
const originalPlatform = process.platform;
|
||||
|
||||
@@ -66,5 +68,41 @@ describe('libvips binaries', function () {
|
||||
|
||||
delete process.env.SHARP_IGNORE_GLOBAL_LIBVIPS;
|
||||
});
|
||||
it('cachePath returns a valid path ending with _libvips', function () {
|
||||
const cachePath = libvips.cachePath();
|
||||
assert.strictEqual('string', typeof cachePath);
|
||||
assert.strictEqual('_libvips', cachePath.substr(-8));
|
||||
assert.strictEqual(true, fs.existsSync(cachePath));
|
||||
});
|
||||
});
|
||||
|
||||
describe('safe directory creation', function () {
|
||||
before(function () {
|
||||
mockFS({
|
||||
exampleDirA: {
|
||||
exampleDirB: {
|
||||
exampleFile: 'Example test file'
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
after(function () { mockFS.restore(); });
|
||||
|
||||
it('mkdirSync creates a directory', function () {
|
||||
const dirPath = 'createdDir';
|
||||
|
||||
libvips.mkdirSync(dirPath);
|
||||
assert.strictEqual(true, fs.existsSync(dirPath));
|
||||
});
|
||||
it('mkdirSync does not throw error or overwrite an existing dir', function () {
|
||||
const dirPath = 'exampleDirA';
|
||||
const nestedDirPath = 'exampleDirA/exampleDirB';
|
||||
assert.strictEqual(true, fs.existsSync(dirPath));
|
||||
|
||||
libvips.mkdirSync(dirPath);
|
||||
|
||||
assert.strictEqual(true, fs.existsSync(dirPath));
|
||||
assert.strictEqual(true, fs.existsSync(nestedDirPath));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -13,12 +13,15 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputJpg).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2725, metadata.width);
|
||||
assert.strictEqual(2225, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('4:2:0', metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -32,12 +35,15 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputJpgWithExif).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(450, metadata.width);
|
||||
assert.strictEqual(600, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual(72, metadata.density);
|
||||
assert.strictEqual('4:2:0', metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(true, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual(8, metadata.orientation);
|
||||
@@ -79,12 +85,15 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputTiff).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('tiff', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2464, metadata.width);
|
||||
assert.strictEqual(3248, metadata.height);
|
||||
assert.strictEqual('b-w', metadata.space);
|
||||
assert.strictEqual(1, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual(300, metadata.density);
|
||||
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual(1, metadata.orientation);
|
||||
@@ -98,12 +107,15 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputPng).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2809, metadata.width);
|
||||
assert.strictEqual(2074, metadata.height);
|
||||
assert.strictEqual('b-w', metadata.space);
|
||||
assert.strictEqual(1, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual(300, metadata.density);
|
||||
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -117,12 +129,15 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputPngWithTransparency).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2048, metadata.width);
|
||||
assert.strictEqual(1536, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(4, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual(72, metadata.density);
|
||||
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(true, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -136,12 +151,15 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputWebP).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('webp', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(1024, metadata.width);
|
||||
assert.strictEqual(772, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -155,11 +173,14 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputGif).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('gif', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(800, metadata.width);
|
||||
assert.strictEqual(533, metadata.height);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -172,11 +193,14 @@ describe('Image metadata', function () {
|
||||
sharp(fixtures.inputGifGreyPlusAlpha).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('gif', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2, metadata.width);
|
||||
assert.strictEqual(1, metadata.height);
|
||||
assert.strictEqual(2, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('undefined', typeof metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(true, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -189,12 +213,15 @@ describe('Image metadata', function () {
|
||||
it('File in, Promise out', function (done) {
|
||||
sharp(fixtures.inputJpg).metadata().then(function (metadata) {
|
||||
assert.strictEqual('jpeg', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2725, metadata.width);
|
||||
assert.strictEqual(2225, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('4:2:0', metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -218,21 +245,22 @@ describe('Image metadata', function () {
|
||||
const pipeline = sharp();
|
||||
pipeline.metadata().then(function (metadata) {
|
||||
assert.strictEqual('jpeg', metadata.format);
|
||||
assert.strictEqual(829183, metadata.size);
|
||||
assert.strictEqual(2725, metadata.width);
|
||||
assert.strictEqual(2225, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('4:2:0', metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
assert.strictEqual('undefined', typeof metadata.exif);
|
||||
assert.strictEqual('undefined', typeof metadata.icc);
|
||||
done();
|
||||
}).catch(function (err) {
|
||||
throw err;
|
||||
});
|
||||
}).catch(done);
|
||||
readable.pipe(pipeline);
|
||||
});
|
||||
|
||||
@@ -241,12 +269,15 @@ describe('Image metadata', function () {
|
||||
const pipeline = sharp().metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', metadata.format);
|
||||
assert.strictEqual(829183, metadata.size);
|
||||
assert.strictEqual(2725, metadata.width);
|
||||
assert.strictEqual(2225, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('4:2:0', metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -262,12 +293,15 @@ describe('Image metadata', function () {
|
||||
image.metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', metadata.format);
|
||||
assert.strictEqual('undefined', typeof metadata.size);
|
||||
assert.strictEqual(2725, metadata.width);
|
||||
assert.strictEqual(2225, metadata.height);
|
||||
assert.strictEqual('srgb', metadata.space);
|
||||
assert.strictEqual(3, metadata.channels);
|
||||
assert.strictEqual('uchar', metadata.depth);
|
||||
assert.strictEqual('undefined', typeof metadata.density);
|
||||
assert.strictEqual('4:2:0', metadata.chromaSubsampling);
|
||||
assert.strictEqual(false, metadata.isProgressive);
|
||||
assert.strictEqual(false, metadata.hasProfile);
|
||||
assert.strictEqual(false, metadata.hasAlpha);
|
||||
assert.strictEqual('undefined', typeof metadata.orientation);
|
||||
@@ -346,6 +380,56 @@ describe('Image metadata', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('chromaSubsampling 4:4:4:4 CMYK JPEG', function () {
|
||||
return sharp(fixtures.inputJpgWithCmykProfile)
|
||||
.metadata()
|
||||
.then(function (metadata) {
|
||||
assert.strictEqual('4:4:4:4', metadata.chromaSubsampling);
|
||||
});
|
||||
});
|
||||
|
||||
it('chromaSubsampling 4:4:4 RGB JPEG', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(10, 10)
|
||||
.jpeg({ chromaSubsampling: '4:4:4' })
|
||||
.toBuffer()
|
||||
.then(function (data) {
|
||||
return sharp(data)
|
||||
.metadata()
|
||||
.then(function (metadata) {
|
||||
assert.strictEqual('4:4:4', metadata.chromaSubsampling);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('isProgressive JPEG', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(10, 10)
|
||||
.jpeg({ progressive: true })
|
||||
.toBuffer()
|
||||
.then(function (data) {
|
||||
return sharp(data)
|
||||
.metadata()
|
||||
.then(function (metadata) {
|
||||
assert.strictEqual(true, metadata.isProgressive);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('isProgressive PNG', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(10, 10)
|
||||
.png({ progressive: true })
|
||||
.toBuffer()
|
||||
.then(function (data) {
|
||||
return sharp(data)
|
||||
.metadata()
|
||||
.then(function (metadata) {
|
||||
assert.strictEqual(true, metadata.isProgressive);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('File input with corrupt header fails gracefully', function (done) {
|
||||
sharp(fixtures.inputJpgWithCorruptHeader)
|
||||
.metadata(function (err) {
|
||||
@@ -365,12 +449,12 @@ describe('Image metadata', function () {
|
||||
describe('Invalid withMetadata parameters', function () {
|
||||
it('String orientation', function () {
|
||||
assert.throws(function () {
|
||||
sharp().withMetadata({orientation: 'zoinks'});
|
||||
sharp().withMetadata({ orientation: 'zoinks' });
|
||||
});
|
||||
});
|
||||
it('Negative orientation', function () {
|
||||
assert.throws(function () {
|
||||
sharp().withMetadata({orientation: -1});
|
||||
sharp().withMetadata({ orientation: -1 });
|
||||
});
|
||||
});
|
||||
it('Zero orientation', function () {
|
||||
@@ -380,7 +464,7 @@ describe('Image metadata', function () {
|
||||
});
|
||||
it('Too large orientation', function () {
|
||||
assert.throws(function () {
|
||||
sharp().withMetadata({orientation: 9});
|
||||
sharp().withMetadata({ orientation: 9 });
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
135
test/unit/recomb.js
Normal file
@@ -0,0 +1,135 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Recomb', function () {
|
||||
it('applies a sepia filter using recomb', function (done) {
|
||||
const output = fixtures.path('output.recomb-sepia.jpg');
|
||||
sharp(fixtures.inputJpgWithLandscapeExif1)
|
||||
.recomb([
|
||||
[0.3588, 0.7044, 0.1368],
|
||||
[0.299, 0.587, 0.114],
|
||||
[0.2392, 0.4696, 0.0912]
|
||||
])
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(600, info.width);
|
||||
assert.strictEqual(450, info.height);
|
||||
fixtures.assertMaxColourDistance(
|
||||
output,
|
||||
fixtures.expected('Landscape_1-recomb-sepia.jpg'),
|
||||
17
|
||||
);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('applies a sepia filter using recomb to an PNG with Alpha', function (done) {
|
||||
const output = fixtures.path('output.recomb-sepia.png');
|
||||
sharp(fixtures.inputPngAlphaPremultiplicationSmall)
|
||||
.recomb([
|
||||
[0.3588, 0.7044, 0.1368],
|
||||
[0.299, 0.587, 0.114],
|
||||
[0.2392, 0.4696, 0.0912]
|
||||
])
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(1024, info.width);
|
||||
assert.strictEqual(768, info.height);
|
||||
fixtures.assertMaxColourDistance(
|
||||
output,
|
||||
fixtures.expected('alpha-recomb-sepia.png'),
|
||||
17
|
||||
);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('applies a different sepia filter using recomb', function (done) {
|
||||
const output = fixtures.path('output.recomb-sepia2.jpg');
|
||||
sharp(fixtures.inputJpgWithLandscapeExif1)
|
||||
.recomb([
|
||||
[0.393, 0.769, 0.189],
|
||||
[0.349, 0.686, 0.168],
|
||||
[0.272, 0.534, 0.131]
|
||||
])
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(600, info.width);
|
||||
assert.strictEqual(450, info.height);
|
||||
fixtures.assertMaxColourDistance(
|
||||
output,
|
||||
fixtures.expected('Landscape_1-recomb-sepia2.jpg'),
|
||||
17
|
||||
);
|
||||
done();
|
||||
});
|
||||
});
|
||||
it('increases the saturation of the image', function (done) {
|
||||
const saturationLevel = 1;
|
||||
const output = fixtures.path('output.recomb-saturation.jpg');
|
||||
sharp(fixtures.inputJpgWithLandscapeExif1)
|
||||
.recomb([
|
||||
[
|
||||
saturationLevel + 1 - 0.2989,
|
||||
-0.587 * saturationLevel,
|
||||
-0.114 * saturationLevel
|
||||
],
|
||||
[
|
||||
-0.2989 * saturationLevel,
|
||||
saturationLevel + 1 - 0.587,
|
||||
-0.114 * saturationLevel
|
||||
],
|
||||
[
|
||||
-0.2989 * saturationLevel,
|
||||
-0.587 * saturationLevel,
|
||||
saturationLevel + 1 - 0.114
|
||||
]
|
||||
])
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(600, info.width);
|
||||
assert.strictEqual(450, info.height);
|
||||
fixtures.assertMaxColourDistance(
|
||||
output,
|
||||
fixtures.expected('Landscape_1-recomb-saturation.jpg'),
|
||||
37
|
||||
);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
describe('invalid matrix specification', function () {
|
||||
it('missing', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpg).recomb();
|
||||
});
|
||||
});
|
||||
it('incorrect flat data', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpg).recomb([1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
||||
});
|
||||
});
|
||||
it('incorrect sub size', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpg).recomb([
|
||||
[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[1, 2, 9, 6]
|
||||
]);
|
||||
});
|
||||
});
|
||||
it('incorrect top size', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpg).recomb([[1, 2, 3, 4], [5, 6, 7, 8]]);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
770
test/unit/resize-contain.js
Normal file
@@ -0,0 +1,770 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Resize fit=contain', function () {
|
||||
it('Allows specifying the position as a string', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240, {
|
||||
fit: 'contain',
|
||||
position: 'center'
|
||||
})
|
||||
.png()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-3-into-3.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('JPEG within PNG, no alpha channel', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240, { fit: 'contain' })
|
||||
.png()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-3-into-3.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('JPEG within WebP, to include alpha channel', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240, {
|
||||
fit: 'contain',
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
})
|
||||
.webp()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('webp', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-3-into-4.webp'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('PNG with alpha channel', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.resize(50, 50, { fit: 'contain' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(50, info.width);
|
||||
assert.strictEqual(50, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-4-into-4.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('16-bit PNG with alpha channel', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.resize(32, 16, { fit: 'contain' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(32, info.width);
|
||||
assert.strictEqual(16, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-16bit.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('16-bit PNG with alpha channel onto RGBA', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency16bit)
|
||||
.resize(32, 16, {
|
||||
fit: 'contain',
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(32, info.width);
|
||||
assert.strictEqual(16, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-16bit-rgba.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('PNG with 2 channels', function (done) {
|
||||
sharp(fixtures.inputPngWithGreyAlpha)
|
||||
.resize(32, 16, {
|
||||
fit: 'contain',
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 }
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(32, info.width);
|
||||
assert.strictEqual(16, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-2channel.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it.skip('TIFF in LAB colourspace onto RGBA background', function (done) {
|
||||
sharp(fixtures.inputTiffCielab)
|
||||
.resize(64, 128, {
|
||||
fit: 'contain',
|
||||
background: { r: 255, g: 102, b: 0, alpha: 0.5 }
|
||||
})
|
||||
.png()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(64, info.width);
|
||||
assert.strictEqual(128, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-lab-into-rgba.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Enlarge', function (done) {
|
||||
sharp(fixtures.inputPngWithOneColor)
|
||||
.resize(320, 240, { fit: 'contain' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(240, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('embed-enlarge.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid position values should fail', function () {
|
||||
[-1, 8.1, 9, 1000000, false, 'vallejo'].forEach(function (position) {
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { fit: 'contain', position });
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal top', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'top'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a2-n.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal right top', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'right top'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a3-ne.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal right', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'right'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a4-e.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal right bottom', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'right bottom'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a5-se.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal bottom', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'bottom'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a6-s.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal left bottom', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'left bottom'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a7-sw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal left', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'left'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a8-w.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal left top', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'left top'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a1-nw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal north', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.north
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a2-n.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal northeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.northeast
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a3-ne.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal east', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.east
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a4-e.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal southeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.southeast
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a5-se.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal south', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.south
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a6-s.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal southwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.southwest
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a7-sw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal west', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.west
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a8-w.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal northwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.northwest
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a1-nw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position horizontal center', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 100, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.center
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(100, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/a9-c.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical top', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'top'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/2-n.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical right top', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'right top'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/3-ne.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical right', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'right'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/4-e.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical right bottom', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'right bottom'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/5-se.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical bottom', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'bottom'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/6-s.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical left bottom', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'left bottom'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/7-sw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical left', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'left'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/8-w.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical left top', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: 'left top'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/1-nw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical north', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.north
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/2-n.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical northeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.northeast
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/3-ne.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical east', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.east
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/4-e.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical southeast', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.southeast
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/5-se.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical south', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.south
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/6-s.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical southwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.southwest
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/7-sw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical west', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.west
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/8-w.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical northwest', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.northwest
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/1-nw.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Position vertical center', function (done) {
|
||||
sharp(fixtures.inputPngEmbed)
|
||||
.resize(200, 200, {
|
||||
fit: sharp.fit.contain,
|
||||
background: { r: 0, g: 0, b: 0, alpha: 0 },
|
||||
position: sharp.gravity.center
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(200, info.width);
|
||||
assert.strictEqual(200, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
fixtures.assertSimilar(fixtures.expected('./embedgravitybird/9-c.png'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
383
test/unit/resize-cover.js
Normal file
@@ -0,0 +1,383 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
|
||||
const sharp = require('../../');
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Resize fit=cover', function () {
|
||||
[
|
||||
// Position
|
||||
{
|
||||
name: 'Position: top',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.position.top,
|
||||
fixture: 'gravity-north.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: right',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.position.right,
|
||||
fixture: 'gravity-east.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: bottom',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.position.bottom,
|
||||
fixture: 'gravity-south.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: left',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.position.left,
|
||||
fixture: 'gravity-west.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: right top (top)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.position['right top'],
|
||||
fixture: 'gravity-north.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: right top (right)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.position['right top'],
|
||||
fixture: 'gravity-east.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: right bottom (bottom)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.position['right bottom'],
|
||||
fixture: 'gravity-south.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: right bottom (right)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.position['right bottom'],
|
||||
fixture: 'gravity-east.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: left bottom (bottom)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.position['left bottom'],
|
||||
fixture: 'gravity-south.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: left bottom (left)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.position['left bottom'],
|
||||
fixture: 'gravity-west.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: left top (top)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.position['left top'],
|
||||
fixture: 'gravity-north.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Position: left top (left)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.position['left top'],
|
||||
fixture: 'gravity-west.jpg'
|
||||
},
|
||||
// Gravity
|
||||
{
|
||||
name: 'Gravity: north',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.north,
|
||||
fixture: 'gravity-north.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: east',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.east,
|
||||
fixture: 'gravity-east.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: south',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.south,
|
||||
fixture: 'gravity-south.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: west',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.west,
|
||||
fixture: 'gravity-west.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: center',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.center,
|
||||
fixture: 'gravity-center.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: centre',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.centre,
|
||||
fixture: 'gravity-centre.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Default (centre)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: undefined,
|
||||
fixture: 'gravity-centre.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: northeast (north)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.northeast,
|
||||
fixture: 'gravity-north.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: northeast (east)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.northeast,
|
||||
fixture: 'gravity-east.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: southeast (south)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.southeast,
|
||||
fixture: 'gravity-south.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: southeast (east)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.southeast,
|
||||
fixture: 'gravity-east.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: southwest (south)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.southwest,
|
||||
fixture: 'gravity-south.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: southwest (west)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.southwest,
|
||||
fixture: 'gravity-west.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: northwest (north)',
|
||||
width: 320,
|
||||
height: 80,
|
||||
gravity: sharp.gravity.northwest,
|
||||
fixture: 'gravity-north.jpg'
|
||||
},
|
||||
{
|
||||
name: 'Gravity: northwest (west)',
|
||||
width: 80,
|
||||
height: 320,
|
||||
gravity: sharp.gravity.northwest,
|
||||
fixture: 'gravity-west.jpg'
|
||||
}
|
||||
].forEach(function (settings) {
|
||||
it(settings.name, function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(settings.width, settings.height, {
|
||||
fit: sharp.fit.cover,
|
||||
position: settings.gravity
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(settings.width, info.width);
|
||||
assert.strictEqual(settings.height, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected(settings.fixture), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Allows specifying the gravity as a string', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(80, 320, {
|
||||
fit: sharp.fit.cover,
|
||||
position: 'east'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(80, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('gravity-east.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid position values fail', function () {
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { fit: 'cover', position: 9 });
|
||||
}, /Expected valid position\/gravity\/strategy for position but received 9 of type number/);
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { fit: 'cover', position: 1.1 });
|
||||
}, /Expected valid position\/gravity\/strategy for position but received 1.1 of type number/);
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { fit: 'cover', position: -1 });
|
||||
}, /Expected valid position\/gravity\/strategy for position but received -1 of type number/);
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { fit: 'cover', position: 'zoinks' }).crop();
|
||||
}, /Expected valid position\/gravity\/strategy for position but received zoinks of type string/);
|
||||
});
|
||||
|
||||
it('Uses default value when none specified', function () {
|
||||
assert.doesNotThrow(function () {
|
||||
sharp().resize(null, null, { fit: 'cover' });
|
||||
});
|
||||
});
|
||||
|
||||
it('Skip crop when post-resize dimensions are at target', function () {
|
||||
return sharp(fixtures.inputJpg)
|
||||
.resize(1600, 1200)
|
||||
.toBuffer()
|
||||
.then(function (input) {
|
||||
return sharp(input)
|
||||
.resize(1110, null, {
|
||||
fit: sharp.fit.cover,
|
||||
position: sharp.strategy.attention
|
||||
})
|
||||
.toBuffer({ resolveWithObject: true })
|
||||
.then(function (result) {
|
||||
assert.strictEqual(1110, result.info.width);
|
||||
assert.strictEqual(832, result.info.height);
|
||||
assert.strictEqual(undefined, result.info.cropOffsetLeft);
|
||||
assert.strictEqual(undefined, result.info.cropOffsetTop);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Entropy-based strategy', function () {
|
||||
it('JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(80, 320, {
|
||||
fit: 'cover',
|
||||
position: sharp.strategy.entropy
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual(80, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
assert.strictEqual(-117, info.cropOffsetLeft);
|
||||
assert.strictEqual(0, info.cropOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('crop-strategy-entropy.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('PNG', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.resize(320, 80, {
|
||||
fit: 'cover',
|
||||
position: sharp.strategy.entropy
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(4, info.channels);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
assert.strictEqual(0, info.cropOffsetLeft);
|
||||
assert.strictEqual(-80, info.cropOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('crop-strategy.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('supports the strategy passed as a string', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.resize(320, 80, {
|
||||
fit: 'cover',
|
||||
position: 'entropy'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(4, info.channels);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
assert.strictEqual(0, info.cropOffsetLeft);
|
||||
assert.strictEqual(-80, info.cropOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('crop-strategy.png'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Attention strategy', function () {
|
||||
it('JPEG', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(80, 320, {
|
||||
fit: 'cover',
|
||||
position: sharp.strategy.attention
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual(80, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
assert.strictEqual(-143, info.cropOffsetLeft);
|
||||
assert.strictEqual(0, info.cropOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('crop-strategy-attention.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('PNG', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.resize(320, 80, {
|
||||
fit: 'cover',
|
||||
position: sharp.strategy.attention
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(4, info.channels);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
assert.strictEqual(0, info.cropOffsetLeft);
|
||||
assert.strictEqual(0, info.cropOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('crop-strategy.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('supports the strategy passed as a string', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency)
|
||||
.resize(320, 80, {
|
||||
fit: 'cover',
|
||||
position: 'attention'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(4, info.channels);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(80, info.height);
|
||||
assert.strictEqual(0, info.cropOffsetLeft);
|
||||
assert.strictEqual(0, info.cropOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('crop-strategy.png'), data, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -130,10 +130,28 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('JPEG shrink-on-load with 90 degree rotation, ensure recalculation is correct', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(1920, 1280)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(1920, info.width);
|
||||
assert.strictEqual(1280, info.height);
|
||||
sharp(data)
|
||||
.rotate(90)
|
||||
.resize(533, 800)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(533, info.width);
|
||||
assert.strictEqual(800, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('TIFF embed known to cause rounding errors', function (done) {
|
||||
sharp(fixtures.inputTiff)
|
||||
.resize(240, 320)
|
||||
.embed()
|
||||
.resize(240, 320, { fit: sharp.fit.contain })
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -159,10 +177,9 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Max width or height considering ratio (portrait)', function (done) {
|
||||
it('fit=inside, portrait', function (done) {
|
||||
sharp(fixtures.inputTiff)
|
||||
.resize(320, 320)
|
||||
.max()
|
||||
.resize(320, 320, { fit: sharp.fit.inside })
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -174,10 +191,9 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Min width or height considering ratio (portrait)', function (done) {
|
||||
it('fit=outside, portrait', function (done) {
|
||||
sharp(fixtures.inputTiff)
|
||||
.resize(320, 320)
|
||||
.min()
|
||||
.resize(320, 320, { fit: sharp.fit.outside })
|
||||
.jpeg()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -189,10 +205,9 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Max width or height considering ratio (landscape)', function (done) {
|
||||
it('fit=inside, landscape', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 320)
|
||||
.max()
|
||||
.resize(320, 320, { fit: sharp.fit.inside })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -203,24 +218,9 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Provide only one dimension with max, should default to crop', function (done) {
|
||||
it('fit=outside, landscape', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320)
|
||||
.max()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(261, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Min width or height considering ratio (landscape)', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 320)
|
||||
.min()
|
||||
.resize(320, 320, { fit: sharp.fit.outside })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -231,10 +231,28 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Provide only one dimension with min, should default to crop', function (done) {
|
||||
it('fit=inside, provide only one dimension', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320)
|
||||
.min()
|
||||
.resize({
|
||||
width: 320,
|
||||
fit: sharp.fit.inside
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(261, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('fit=outside, provide only one dimension', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize({
|
||||
width: 320,
|
||||
fit: sharp.fit.outside
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -247,8 +265,10 @@ describe('Resize dimensions', function () {
|
||||
|
||||
it('Do not enlarge when input width is already less than output width', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(2800)
|
||||
.withoutEnlargement()
|
||||
.resize({
|
||||
width: 2800,
|
||||
withoutEnlargement: true
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -261,8 +281,10 @@ describe('Resize dimensions', function () {
|
||||
|
||||
it('Do not enlarge when input height is already less than output height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(null, 2300)
|
||||
.withoutEnlargement()
|
||||
.resize({
|
||||
height: 2300,
|
||||
withoutEnlargement: true
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -275,8 +297,10 @@ describe('Resize dimensions', function () {
|
||||
|
||||
it('Do enlarge when input width is less than output width', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(2800)
|
||||
.withoutEnlargement(false)
|
||||
.resize({
|
||||
width: 2800,
|
||||
withoutEnlargement: false
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
@@ -287,103 +311,127 @@ describe('Resize dimensions', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale width and height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(320, 320).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, downscale width and height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 320, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale width, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(320).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, downscale width', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize({
|
||||
width: 320,
|
||||
fit: 'fill'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(null, 320).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, downscale height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize({
|
||||
height: 320,
|
||||
fit: 'fill'
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale width and height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(3000, 3000).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, upscale width and height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(3000, 3000, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale width, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(3000).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, upscale width', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(3000, null, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(null, 3000).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, upscale height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(null, 3000, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Downscale width, upscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(320, 3000).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, downscale width, upscale height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 3000, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(320, info.width);
|
||||
assert.strictEqual(3000, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Upscale width, downscale height, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).resize(3000, 320).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, upscale width, downscale height', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(3000, 320, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(3000, info.width);
|
||||
assert.strictEqual(320, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Identity transform, ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg).ignoreAspectRatio().toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
it('fit=fill, identity transform', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(null, null, { fit: 'fill' })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('Dimensions that result in differing even shrinks on each axis', function (done) {
|
||||
@@ -449,6 +497,7 @@ describe('Resize dimensions', function () {
|
||||
[
|
||||
sharp.kernel.nearest,
|
||||
sharp.kernel.cubic,
|
||||
sharp.kernel.mitchell,
|
||||
sharp.kernel.lanczos2,
|
||||
sharp.kernel.lanczos3
|
||||
].forEach(function (kernel) {
|
||||
@@ -481,4 +530,16 @@ describe('Resize dimensions', function () {
|
||||
sharp().resize(null, null, { kernel: 'unknown' });
|
||||
});
|
||||
});
|
||||
|
||||
it('unknown fit throws', function () {
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { fit: 'unknown' });
|
||||
});
|
||||
});
|
||||
|
||||
it('unknown position throws', function () {
|
||||
assert.throws(function () {
|
||||
sharp().resize(null, null, { position: 'unknown' });
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -23,6 +23,33 @@ describe('Rotation', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 30 degrees with semi-transparent background', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.rotate(30, { background: { r: 255, g: 0, b: 0, alpha: 0.5 } })
|
||||
.resize(320)
|
||||
.png()
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(408, info.width);
|
||||
assert.strictEqual(386, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('rotate-transparent-bg.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 30 degrees with solid background', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.rotate(30, { background: { r: 255, g: 0, b: 0, alpha: 0.5 } })
|
||||
.resize(320)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(408, info.width);
|
||||
assert.strictEqual(386, info.height);
|
||||
fixtures.assertSimilar(fixtures.expected('rotate-solid-bg.jpg'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 90 degrees, respecting output input size', function (done) {
|
||||
sharp(fixtures.inputJpg).rotate(90).resize(320, 240).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -34,6 +61,17 @@ describe('Rotation', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 30 degrees, respecting output input size', function (done) {
|
||||
sharp(fixtures.inputJpg).rotate(30).resize(320, 240).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, data.length > 0);
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(397, info.width);
|
||||
assert.strictEqual(368, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
[-3690, -450, -90, 90, 450, 3690].forEach(function (angle) {
|
||||
it('Rotate by any 90-multiple angle (' + angle + 'deg)', function (done) {
|
||||
sharp(fixtures.inputJpg320x240).rotate(angle).toBuffer(function (err, data, info) {
|
||||
@@ -45,6 +83,17 @@ describe('Rotation', function () {
|
||||
});
|
||||
});
|
||||
|
||||
[-3750, -510, -150, 30, 390, 3630].forEach(function (angle) {
|
||||
it('Rotate by any 30-multiple angle (' + angle + 'deg)', function (done) {
|
||||
sharp(fixtures.inputJpg320x240).rotate(angle).toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(397, info.width);
|
||||
assert.strictEqual(368, info.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
[-3780, -540, 0, 180, 540, 3780].forEach(function (angle) {
|
||||
it('Rotate by any 180-multiple angle (' + angle + 'deg)', function (done) {
|
||||
sharp(fixtures.inputJpg320x240).rotate(angle).toBuffer(function (err, data, info) {
|
||||
@@ -58,8 +107,7 @@ describe('Rotation', function () {
|
||||
|
||||
it('Rotate by 270 degrees, square output ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(240, 240)
|
||||
.ignoreAspectRatio()
|
||||
.resize(240, 240, { fit: sharp.fit.fill })
|
||||
.rotate(270)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -74,10 +122,26 @@ describe('Rotation', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 315 degrees, square output ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(240, 240, { fit: sharp.fit.fill })
|
||||
.rotate(315)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(339, info.width);
|
||||
assert.strictEqual(339, info.height);
|
||||
sharp(data).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(339, metadata.width);
|
||||
assert.strictEqual(339, metadata.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 270 degrees, rectangular output ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.ignoreAspectRatio()
|
||||
.resize(320, 240, { fit: sharp.fit.fill })
|
||||
.rotate(270)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
@@ -92,6 +156,23 @@ describe('Rotation', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate by 30 degrees, rectangular output ignoring aspect ratio', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240, { fit: sharp.fit.fill })
|
||||
.rotate(30)
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(397, info.width);
|
||||
assert.strictEqual(368, info.height);
|
||||
sharp(data).metadata(function (err, metadata) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(397, metadata.width);
|
||||
assert.strictEqual(368, metadata.height);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Input image has Orientation EXIF tag but do not rotate output', function (done) {
|
||||
sharp(fixtures.inputJpgWithExif)
|
||||
.resize(320)
|
||||
@@ -127,7 +208,7 @@ describe('Rotation', function () {
|
||||
sharp(fixtures.inputJpgWithExif)
|
||||
.rotate()
|
||||
.resize(320)
|
||||
.withMetadata({orientation: 3})
|
||||
.withMetadata({ orientation: 3 })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
@@ -185,9 +266,9 @@ describe('Rotation', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Rotate to an invalid angle, should fail', function () {
|
||||
it('Rotate with a string argument, should fail', function () {
|
||||
assert.throws(function () {
|
||||
sharp(fixtures.inputJpg).rotate(1);
|
||||
sharp(fixtures.inputJpg).rotate('not-a-number');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ describe('Image Stats', function () {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 7.319914765248541));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -82,6 +83,7 @@ describe('Image Stats', function () {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 0.3409031108021736));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -105,7 +107,9 @@ describe('Image Stats', function () {
|
||||
it('PNG with transparency', function (done) {
|
||||
sharp(fixtures.inputPngWithTransparency).stats(function (err, stats) {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(false, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 0.06778064835816622));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -180,6 +184,7 @@ describe('Image Stats', function () {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(false, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 0));
|
||||
|
||||
// alpha channel
|
||||
assert.strictEqual(0, stats.channels[3]['min']);
|
||||
@@ -204,7 +209,9 @@ describe('Image Stats', function () {
|
||||
it('Tiff', function (done) {
|
||||
sharp(fixtures.inputTiff).stats(function (err, stats) {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 0.3851250782608986));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -231,6 +238,7 @@ describe('Image Stats', function () {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 7.51758075132966));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -289,6 +297,7 @@ describe('Image Stats', function () {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 6.087309412541799));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(35, stats.channels[0]['min']);
|
||||
@@ -345,7 +354,9 @@ describe('Image Stats', function () {
|
||||
it('Grayscale GIF with alpha', function (done) {
|
||||
sharp(fixtures.inputGifGreyPlusAlpha).stats(function (err, stats) {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(false, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 1));
|
||||
|
||||
// gray channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -387,7 +398,9 @@ describe('Image Stats', function () {
|
||||
const readable = fs.createReadStream(fixtures.inputJpg);
|
||||
const pipeline = sharp().stats(function (err, stats) {
|
||||
if (err) throw err;
|
||||
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 7.319914765248541));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -449,6 +462,7 @@ describe('Image Stats', function () {
|
||||
|
||||
return pipeline.stats().then(function (stats) {
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 7.319914765248541));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
@@ -505,6 +519,7 @@ describe('Image Stats', function () {
|
||||
it('File in, Promise out', function () {
|
||||
return sharp(fixtures.inputJpg).stats().then(function (stats) {
|
||||
assert.strictEqual(true, stats.isOpaque);
|
||||
assert.strictEqual(true, isInAcceptableRange(stats.entropy, 7.319914765248541));
|
||||
|
||||
// red channel
|
||||
assert.strictEqual(0, stats.channels[0]['min']);
|
||||
|
||||
@@ -132,7 +132,7 @@ describe('Threshold', function () {
|
||||
it('color threshold', function (done) {
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.threshold(128, {'grayscale': false})
|
||||
.threshold(128, { grayscale: false })
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
|
||||
@@ -46,6 +46,51 @@ const assertDeepZoomTiles = function (directory, expectedSize, expectedLevels, d
|
||||
}, done);
|
||||
};
|
||||
|
||||
const assertZoomifyTiles = function (directory, expectedTileSize, expectedLevels, done) {
|
||||
fs.stat(path.join(directory, 'ImageProperties.xml'), function (err, stat) {
|
||||
if (err) throw err;
|
||||
assert.ok(stat.isFile());
|
||||
assert.ok(stat.size > 0);
|
||||
|
||||
let maxTileLevel = -1;
|
||||
fs.readdirSync(path.join(directory, 'TileGroup0')).forEach(function (tile) {
|
||||
// Verify tile file name
|
||||
assert.ok(/^[0-9]+-[0-9]+-[0-9]+\.jpg$/.test(tile));
|
||||
let level = parseInt(tile.split('-')[0]);
|
||||
maxTileLevel = Math.max(maxTileLevel, level);
|
||||
});
|
||||
|
||||
assert.strictEqual(maxTileLevel + 1, expectedLevels); // add one to account for zero level tile
|
||||
|
||||
done();
|
||||
});
|
||||
};
|
||||
|
||||
const assertGoogleTiles = function (directory, expectedTileSize, expectedLevels, done) {
|
||||
const levels = fs.readdirSync(directory);
|
||||
assert.strictEqual(expectedLevels, levels.length - 1); // subtract one to account for default blank tile
|
||||
|
||||
fs.stat(path.join(directory, 'blank.png'), function (err, stat) {
|
||||
if (err) throw err;
|
||||
assert.ok(stat.isFile());
|
||||
assert.ok(stat.size > 0);
|
||||
|
||||
// Basic check to confirm lowest and highest level tiles exist
|
||||
fs.stat(path.join(directory, '0', '0', '0.jpg'), function (err, stat) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, stat.isFile());
|
||||
assert.strictEqual(true, stat.size > 0);
|
||||
|
||||
fs.stat(path.join(directory, (expectedLevels - 1).toString(), '0', '0.jpg'), function (err, stat) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, stat.isFile());
|
||||
assert.strictEqual(true, stat.size > 0);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
describe('Tile', function () {
|
||||
it('Valid size values pass', function () {
|
||||
[1, 8192].forEach(function (size) {
|
||||
@@ -144,6 +189,26 @@ describe('Tile', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Valid depths pass', function () {
|
||||
['onepixel', 'onetile', 'one'].forEach(function (depth) {
|
||||
assert.doesNotThrow(function (depth) {
|
||||
sharp().tile({
|
||||
depth: depth
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Invalid depths fail', function () {
|
||||
['depth', 1].forEach(function (depth) {
|
||||
assert.throws(function () {
|
||||
sharp().tile({
|
||||
depth: depth
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Prevent larger overlap than default size', function () {
|
||||
assert.throws(function () {
|
||||
sharp().tile({
|
||||
@@ -251,6 +316,54 @@ describe('Tile', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Deep Zoom layout with depth of one', function (done) {
|
||||
const directory = fixtures.path('output.512_depth_one.dzi_files');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
size: 512,
|
||||
depth: 'one'
|
||||
})
|
||||
.toFile(fixtures.path('output.512_depth_one.dzi'), function (err, info) {
|
||||
if (err) throw err;
|
||||
// Verify only one depth generated
|
||||
assertDeepZoomTiles(directory, 512, 1, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Deep Zoom layout with depth of onepixel', function (done) {
|
||||
const directory = fixtures.path('output.512_depth_onepixel.dzi_files');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
size: 512,
|
||||
depth: 'onepixel'
|
||||
})
|
||||
.toFile(fixtures.path('output.512_depth_onepixel.dzi'), function (err, info) {
|
||||
if (err) throw err;
|
||||
// Verify only one depth generated
|
||||
assertDeepZoomTiles(directory, 512, 13, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Deep Zoom layout with depth of onetile', function (done) {
|
||||
const directory = fixtures.path('output.256_depth_onetile.dzi_files');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
size: 256,
|
||||
depth: 'onetile'
|
||||
})
|
||||
.toFile(fixtures.path('output.256_depth_onetile.dzi'), function (err, info) {
|
||||
if (err) throw err;
|
||||
// Verify only one depth generated
|
||||
assertDeepZoomTiles(directory, 256, 5, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Zoomify layout', function (done) {
|
||||
const directory = fixtures.path('output.zoomify.dzi');
|
||||
rimraf(directory, function () {
|
||||
@@ -275,6 +388,69 @@ describe('Tile', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Zoomify layout with depth one', function (done) {
|
||||
const directory = fixtures.path('output.zoomify.depth_one.dzi');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
size: 256,
|
||||
layout: 'zoomify',
|
||||
depth: 'one'
|
||||
})
|
||||
.toFile(directory, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('dz', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual('number', typeof info.size);
|
||||
assertZoomifyTiles(directory, 256, 1, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Zoomify layout with depth onetile', function (done) {
|
||||
const directory = fixtures.path('output.zoomify.depth_onetile.dzi');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
size: 256,
|
||||
layout: 'zoomify',
|
||||
depth: 'onetile'
|
||||
})
|
||||
.toFile(directory, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('dz', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual('number', typeof info.size);
|
||||
assertZoomifyTiles(directory, 256, 5, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Zoomify layout with depth onepixel', function (done) {
|
||||
const directory = fixtures.path('output.zoomify.depth_onepixel.dzi');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
size: 256,
|
||||
layout: 'zoomify',
|
||||
depth: 'onepixel'
|
||||
})
|
||||
.toFile(directory, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('dz', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual('number', typeof info.size);
|
||||
assertZoomifyTiles(directory, 256, 13, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Google layout', function (done) {
|
||||
const directory = fixtures.path('output.google.dzi');
|
||||
rimraf(directory, function () {
|
||||
@@ -410,6 +586,72 @@ describe('Tile', function () {
|
||||
});
|
||||
});
|
||||
|
||||
it('Google layout with depth one', function (done) {
|
||||
const directory = fixtures.path('output.google_depth_one.dzi');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
layout: 'google',
|
||||
depth: 'one',
|
||||
size: 256
|
||||
})
|
||||
.toFile(directory, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('dz', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual('number', typeof info.size);
|
||||
|
||||
assertGoogleTiles(directory, 256, 1, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Google layout with depth onepixel', function (done) {
|
||||
const directory = fixtures.path('output.google_depth_onepixel.dzi');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
layout: 'google',
|
||||
depth: 'onepixel',
|
||||
size: 256
|
||||
})
|
||||
.toFile(directory, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('dz', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual('number', typeof info.size);
|
||||
|
||||
assertGoogleTiles(directory, 256, 13, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Google layout with depth onetile', function (done) {
|
||||
const directory = fixtures.path('output.google_depth_onetile.dzi');
|
||||
rimraf(directory, function () {
|
||||
sharp(fixtures.inputJpg)
|
||||
.tile({
|
||||
layout: 'google',
|
||||
depth: 'onetile',
|
||||
size: 256
|
||||
})
|
||||
.toFile(directory, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('dz', info.format);
|
||||
assert.strictEqual(2725, info.width);
|
||||
assert.strictEqual(2225, info.height);
|
||||
assert.strictEqual(3, info.channels);
|
||||
assert.strictEqual('number', typeof info.size);
|
||||
|
||||
assertGoogleTiles(directory, 256, 5, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('Write to ZIP container using file extension', function (done) {
|
||||
const container = fixtures.path('output.dz.container.zip');
|
||||
const extractTo = fixtures.path('output.dz.container');
|
||||
|
||||
@@ -9,12 +9,38 @@ describe('Tint', function () {
|
||||
it('tints rgb image red', function (done) {
|
||||
const output = fixtures.path('output.tint-red.jpg');
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint('#FF0000')
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('tint-red.jpg'), 10);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('tint-red.jpg'), 18);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('tints rgb image green', function (done) {
|
||||
const output = fixtures.path('output.tint-green.jpg');
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint('#00FF00')
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('tint-green.jpg'), 27);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('tints rgb image blue', function (done) {
|
||||
const output = fixtures.path('output.tint-blue.jpg');
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint('#0000FF')
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('tint-blue.jpg'), 14);
|
||||
done();
|
||||
});
|
||||
});
|
||||
@@ -22,7 +48,7 @@ describe('Tint', function () {
|
||||
it('tints rgb image with sepia tone', function (done) {
|
||||
const output = fixtures.path('output.tint-sepia.jpg');
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint('#704214')
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
@@ -36,7 +62,7 @@ describe('Tint', function () {
|
||||
it('tints rgb image with sepia tone with rgb colour', function (done) {
|
||||
const output = fixtures.path('output.tint-sepia.jpg');
|
||||
sharp(fixtures.inputJpg)
|
||||
.resize(320, 240)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint([112, 66, 20])
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
@@ -50,7 +76,7 @@ describe('Tint', function () {
|
||||
it('tints rgb image with alpha channel', function (done) {
|
||||
const output = fixtures.path('output.tint-alpha.png');
|
||||
sharp(fixtures.inputPngRGBWithAlpha)
|
||||
.resize(320, 240)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint('#704214')
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
@@ -60,4 +86,17 @@ describe('Tint', function () {
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('tints cmyk image red', function (done) {
|
||||
const output = fixtures.path('output.tint-cmyk.jpg');
|
||||
sharp(fixtures.inputJpgWithCmykProfile)
|
||||
.resize(320, 240, { fastShrinkOnLoad: false })
|
||||
.tint('#FF0000')
|
||||
.toFile(output, function (err, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual(true, info.size > 0);
|
||||
fixtures.assertMaxColourDistance(output, fixtures.expected('tint-cmyk.jpg'), 15);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
const assert = require('assert');
|
||||
|
||||
const sharp = require('../../');
|
||||
const inRange = require('../../lib/is').inRange;
|
||||
const fixtures = require('../fixtures');
|
||||
|
||||
describe('Trim borders', function () {
|
||||
@@ -16,6 +17,8 @@ describe('Trim borders', function () {
|
||||
assert.strictEqual('png', info.format);
|
||||
assert.strictEqual(450, info.width);
|
||||
assert.strictEqual(322, info.height);
|
||||
assert.strictEqual(-204, info.trimOffsetLeft);
|
||||
assert.strictEqual(0, info.trimOffsetTop);
|
||||
fixtures.assertSimilar(expected, data, done);
|
||||
});
|
||||
});
|
||||
@@ -24,12 +27,16 @@ describe('Trim borders', function () {
|
||||
const expected = fixtures.expected('alpha-layer-2-trim-resize.jpg');
|
||||
sharp(fixtures.inputJpgOverlayLayer2)
|
||||
.trim()
|
||||
.resize(300)
|
||||
.resize({
|
||||
width: 300,
|
||||
fastShrinkOnLoad: false
|
||||
})
|
||||
.toBuffer(function (err, data, info) {
|
||||
if (err) throw err;
|
||||
assert.strictEqual('jpeg', info.format);
|
||||
assert.strictEqual(300, info.width);
|
||||
assert.strictEqual(300, info.height);
|
||||
assert.strictEqual(true, inRange(info.trimOffsetLeft, -873, -870));
|
||||
assert.strictEqual(-554, info.trimOffsetTop);
|
||||
fixtures.assertSimilar(expected, data, done);
|
||||
});
|
||||
});
|
||||
@@ -45,12 +52,14 @@ describe('Trim borders', function () {
|
||||
assert.strictEqual(32, info.width);
|
||||
assert.strictEqual(32, info.height);
|
||||
assert.strictEqual(4, info.channels);
|
||||
assert.strictEqual(-2, info.trimOffsetLeft);
|
||||
assert.strictEqual(-2, info.trimOffsetTop);
|
||||
fixtures.assertSimilar(fixtures.expected('trim-16bit-rgba.png'), data, done);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid thresholds', function () {
|
||||
[-1, 100, 'fail', {}].forEach(function (threshold) {
|
||||
[-1, 'fail', {}].forEach(function (threshold) {
|
||||
it(JSON.stringify(threshold), function () {
|
||||
assert.throws(function () {
|
||||
sharp().trim(threshold);
|
||||
|
||||