Source Code: lib/zlib.js
The node:zlib module provides compression functionality implemented using\nGzip, Deflate/Inflate, and Brotli.
node:zlib
To access it:
const zlib = require('node:zlib');\n
Compression and decompression are built around the Node.js Streams API.
Compressing or decompressing a stream (such as a file) can be accomplished by\npiping the source stream through a zlib Transform stream into a destination\nstream:
zlib
Transform
const { createGzip } = require('node:zlib');\nconst { pipeline } = require('node:stream');\nconst {\n createReadStream,\n createWriteStream\n} = require('node:fs');\n\nconst gzip = createGzip();\nconst source = createReadStream('input.txt');\nconst destination = createWriteStream('input.txt.gz');\n\npipeline(source, gzip, destination, (err) => {\n if (err) {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n }\n});\n\n// Or, Promisified\n\nconst { promisify } = require('node:util');\nconst pipe = promisify(pipeline);\n\nasync function do_gzip(input, output) {\n const gzip = createGzip();\n const source = createReadStream(input);\n const destination = createWriteStream(output);\n await pipe(source, gzip, destination);\n}\n\ndo_gzip('input.txt', 'input.txt.gz')\n .catch((err) => {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n });\n
It is also possible to compress or decompress data in a single step:
const { deflate, unzip } = require('node:zlib');\n\nconst input = '.................................';\ndeflate(input, (err, buffer) => {\n if (err) {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n }\n console.log(buffer.toString('base64'));\n});\n\nconst buffer = Buffer.from('eJzT0yMAAGTvBe8=', 'base64');\nunzip(buffer, (err, buffer) => {\n if (err) {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n }\n console.log(buffer.toString());\n});\n\n// Or, Promisified\n\nconst { promisify } = require('node:util');\nconst do_unzip = promisify(unzip);\n\ndo_unzip(buffer)\n .then((buf) => console.log(buf.toString()))\n .catch((err) => {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n });\n
All zlib APIs, except those that are explicitly synchronous, use the Node.js\ninternal threadpool. This can lead to surprising effects and performance\nlimitations in some applications.
Creating and using a large number of zlib objects simultaneously can cause\nsignificant memory fragmentation.
const zlib = require('node:zlib');\n\nconst payload = Buffer.from('This is some data');\n\n// WARNING: DO NOT DO THIS!\nfor (let i = 0; i < 30000; ++i) {\n zlib.deflate(payload, (err, buffer) => {});\n}\n
In the preceding example, 30,000 deflate instances are created concurrently.\nBecause of how some operating systems handle memory allocation and\ndeallocation, this may lead to significant memory fragmentation.
It is strongly recommended that the results of compression\noperations be cached to avoid duplication of effort.
The node:zlib module can be used to implement support for the gzip, deflate\nand br content-encoding mechanisms defined by\nHTTP.
gzip
deflate
br
The HTTP Accept-Encoding header is used within an HTTP request to identify\nthe compression encodings accepted by the client. The Content-Encoding\nheader is used to identify the compression encodings actually applied to a\nmessage.
Accept-Encoding
Content-Encoding
The examples given below are drastically simplified to show the basic concept.\nUsing zlib encoding can be expensive, and the results ought to be cached.\nSee Memory usage tuning for more information on the speed/memory/compression\ntradeoffs involved in zlib usage.
// Client request example\nconst zlib = require('node:zlib');\nconst http = require('node:http');\nconst fs = require('node:fs');\nconst { pipeline } = require('node:stream');\n\nconst request = http.get({ host: 'example.com',\n path: '/',\n port: 80,\n headers: { 'Accept-Encoding': 'br,gzip,deflate' } });\nrequest.on('response', (response) => {\n const output = fs.createWriteStream('example.com_index.html');\n\n const onError = (err) => {\n if (err) {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n }\n };\n\n switch (response.headers['content-encoding']) {\n case 'br':\n pipeline(response, zlib.createBrotliDecompress(), output, onError);\n break;\n // Or, just use zlib.createUnzip() to handle both of the following cases:\n case 'gzip':\n pipeline(response, zlib.createGunzip(), output, onError);\n break;\n case 'deflate':\n pipeline(response, zlib.createInflate(), output, onError);\n break;\n default:\n pipeline(response, output, onError);\n break;\n }\n});\n
// server example\n// Running a gzip operation on every request is quite expensive.\n// It would be much more efficient to cache the compressed buffer.\nconst zlib = require('node:zlib');\nconst http = require('node:http');\nconst fs = require('node:fs');\nconst { pipeline } = require('node:stream');\n\nhttp.createServer((request, response) => {\n const raw = fs.createReadStream('index.html');\n // Store both a compressed and an uncompressed version of the resource.\n response.setHeader('Vary', 'Accept-Encoding');\n let acceptEncoding = request.headers['accept-encoding'];\n if (!acceptEncoding) {\n acceptEncoding = '';\n }\n\n const onError = (err) => {\n if (err) {\n // If an error occurs, there's not much we can do because\n // the server has already sent the 200 response code and\n // some amount of data has already been sent to the client.\n // The best we can do is terminate the response immediately\n // and log the error.\n response.end();\n console.error('An error occurred:', err);\n }\n };\n\n // Note: This is not a conformant accept-encoding parser.\n // See https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3\n if (/\\bdeflate\\b/.test(acceptEncoding)) {\n response.writeHead(200, { 'Content-Encoding': 'deflate' });\n pipeline(raw, zlib.createDeflate(), response, onError);\n } else if (/\\bgzip\\b/.test(acceptEncoding)) {\n response.writeHead(200, { 'Content-Encoding': 'gzip' });\n pipeline(raw, zlib.createGzip(), response, onError);\n } else if (/\\bbr\\b/.test(acceptEncoding)) {\n response.writeHead(200, { 'Content-Encoding': 'br' });\n pipeline(raw, zlib.createBrotliCompress(), response, onError);\n } else {\n response.writeHead(200, {});\n pipeline(raw, response, onError);\n }\n}).listen(1337);\n
By default, the zlib methods will throw an error when decompressing\ntruncated data. However, if it is known that the data is incomplete, or\nthe desire is to inspect only the beginning of a compressed file, it is\npossible to suppress the default error handling by changing the flushing\nmethod that is used to decompress the last chunk of input data:
// This is a truncated version of the buffer from the above examples\nconst buffer = Buffer.from('eJzT0yMA', 'base64');\n\nzlib.unzip(\n buffer,\n // For Brotli, the equivalent is zlib.constants.BROTLI_OPERATION_FLUSH.\n { finishFlush: zlib.constants.Z_SYNC_FLUSH },\n (err, buffer) => {\n if (err) {\n console.error('An error occurred:', err);\n process.exitCode = 1;\n }\n console.log(buffer.toString());\n });\n
This will not change the behavior in other error-throwing situations, e.g.\nwhen the input data has an invalid format. Using this method, it will not be\npossible to determine whether the input ended prematurely or lacks the\nintegrity checks, making it necessary to manually check that the\ndecompressed result is valid.
Calling .flush() on a compression stream will make zlib return as much\noutput as currently possible. This may come at the cost of degraded compression\nquality, but can be useful when data needs to be available as soon as possible.
.flush()
In the following example, flush() is used to write a compressed partial\nHTTP response to the client:
flush()
const zlib = require('node:zlib');\nconst http = require('node:http');\nconst { pipeline } = require('node:stream');\n\nhttp.createServer((request, response) => {\n // For the sake of simplicity, the Accept-Encoding checks are omitted.\n response.writeHead(200, { 'content-encoding': 'gzip' });\n const output = zlib.createGzip();\n let i;\n\n pipeline(output, response, (err) => {\n if (err) {\n // If an error occurs, there's not much we can do because\n // the server has already sent the 200 response code and\n // some amount of data has already been sent to the client.\n // The best we can do is terminate the response immediately\n // and log the error.\n clearInterval(i);\n response.end();\n console.error('An error occurred:', err);\n }\n });\n\n i = setInterval(() => {\n output.write(`The current time is ${Date()}\\n`, () => {\n // The data has been passed to zlib, but the compression algorithm may\n // have decided to buffer the data for more efficient compression.\n // Calling .flush() will make the data available as soon as the client\n // is ready to receive it.\n output.flush();\n });\n }, 1000);\n}).listen(1337);\n
From zlib/zconf.h, modified for Node.js usage:
zlib/zconf.h
The memory requirements for deflate are (in bytes):
(1 << (windowBits + 2)) + (1 << (memLevel + 9))\n
That is: 128K for windowBits = 15 + 128K for memLevel = 8\n(default values) plus a few kilobytes for small objects.
windowBits
memLevel
For example, to reduce the default memory requirements from 256K to 128K, the\noptions should be set to:
const options = { windowBits: 14, memLevel: 7 };\n
This will, however, generally degrade compression.
The memory requirements for inflate are (in bytes) 1 << windowBits.\nThat is, 32K for windowBits = 15 (default value) plus a few kilobytes\nfor small objects.
1 << windowBits
This is in addition to a single internal output slab buffer of size\nchunkSize, which defaults to 16K.
chunkSize
The speed of zlib compression is affected most dramatically by the\nlevel setting. A higher level will result in better compression, but\nwill take longer to complete. A lower level will result in less\ncompression, but will be much faster.
level
In general, greater memory usage options will mean that Node.js has to make\nfewer calls to zlib because it will be able to process more data on\neach write operation. So, this is another factor that affects the\nspeed, at the cost of memory usage.
write
There are equivalents to the zlib options for Brotli-based streams, although\nthese options have different ranges than the zlib ones:
BROTLI_PARAM_QUALITY
BROTLI_PARAM_LGWIN
See below for more details on Brotli-specific options.
All of the constants defined in zlib.h are also defined on\nrequire('node:zlib').constants. In the normal course of operations, it will\nnot be necessary to use these constants. They are documented so that their\npresence is not surprising. This section is taken almost directly from the\nzlib documentation.
zlib.h
require('node:zlib').constants
Previously, the constants were available directly from require('node:zlib'),\nfor instance zlib.Z_NO_FLUSH. Accessing the constants directly from the module\nis currently still possible but is deprecated.
require('node:zlib')
zlib.Z_NO_FLUSH
Allowed flush values.
zlib.constants.Z_NO_FLUSH
zlib.constants.Z_PARTIAL_FLUSH
zlib.constants.Z_SYNC_FLUSH
zlib.constants.Z_FULL_FLUSH
zlib.constants.Z_FINISH
zlib.constants.Z_BLOCK
zlib.constants.Z_TREES
Return codes for the compression/decompression functions. Negative\nvalues are errors, positive values are used for special but normal\nevents.
zlib.constants.Z_OK
zlib.constants.Z_STREAM_END
zlib.constants.Z_NEED_DICT
zlib.constants.Z_ERRNO
zlib.constants.Z_STREAM_ERROR
zlib.constants.Z_DATA_ERROR
zlib.constants.Z_MEM_ERROR
zlib.constants.Z_BUF_ERROR
zlib.constants.Z_VERSION_ERROR
Compression levels.
zlib.constants.Z_NO_COMPRESSION
zlib.constants.Z_BEST_SPEED
zlib.constants.Z_BEST_COMPRESSION
zlib.constants.Z_DEFAULT_COMPRESSION
Compression strategy.
zlib.constants.Z_FILTERED
zlib.constants.Z_HUFFMAN_ONLY
zlib.constants.Z_RLE
zlib.constants.Z_FIXED
zlib.constants.Z_DEFAULT_STRATEGY
There are several options and other constants available for Brotli-based\nstreams:
The following values are valid flush operations for Brotli-based streams:
zlib.constants.BROTLI_OPERATION_PROCESS
zlib.constants.BROTLI_OPERATION_FLUSH
zlib.constants.BROTLI_OPERATION_FINISH
zlib.constants.BROTLI_OPERATION_EMIT_METADATA
There are several options that can be set on Brotli encoders, affecting\ncompression efficiency and speed. Both the keys and the values can be accessed\nas properties of the zlib.constants object.
zlib.constants
The most important options are:
BROTLI_PARAM_MODE
BROTLI_MODE_GENERIC
BROTLI_MODE_TEXT
BROTLI_MODE_FONT
BROTLI_MIN_QUALITY
BROTLI_MAX_QUALITY
BROTLI_DEFAULT_QUALITY
BROTLI_PARAM_SIZE_HINT
0
The following flags can be set for advanced control over the compression\nalgorithm and memory usage tuning:
BROTLI_MIN_WINDOW_BITS
BROTLI_MAX_WINDOW_BITS
BROTLI_DEFAULT_WINDOW
BROTLI_LARGE_MAX_WINDOW_BITS
BROTLI_PARAM_LARGE_WINDOW
BROTLI_PARAM_LGBLOCK
BROTLI_MIN_INPUT_BLOCK_BITS
BROTLI_MAX_INPUT_BLOCK_BITS
BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING
BROTLI_PARAM_NPOSTFIX
BROTLI_MAX_NPOSTFIX
BROTLI_PARAM_NDIRECT
15 << NPOSTFIX
1 << NPOSTFIX
These advanced options are available for controlling decompression:
BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION
BROTLI_DECODER_PARAM_LARGE_WINDOW
Each zlib-based class takes an options object. No options are required.
options
Some options are only relevant when compressing and are\nignored by the decompression classes.
flush
finishFlush
16 * 1024
strategy
dictionary
info
true
buffer
engine
maxOutputLength
buffer.kMaxLength
See the deflateInit2 and inflateInit2 documentation for more\ninformation.
deflateInit2
inflateInit2
Each Brotli-based class takes an options object. All options are optional.
params
For example:
const stream = zlib.createBrotliCompress({\n chunkSize: 32 * 1024,\n params: {\n [zlib.constants.BROTLI_PARAM_MODE]: zlib.constants.BROTLI_MODE_TEXT,\n [zlib.constants.BROTLI_PARAM_QUALITY]: 4,\n [zlib.constants.BROTLI_PARAM_SIZE_HINT]: fs.statSync(inputFile).size\n }\n});\n
All of these take a Buffer, TypedArray, DataView,\nArrayBuffer or string as the first argument, an optional second argument\nto supply options to the zlib classes and will call the supplied callback\nwith callback(error, result).
Buffer
TypedArray
DataView
ArrayBuffer
callback(error, result)
Every method has a *Sync counterpart, which accept the same arguments, but\nwithout a callback.
*Sync
Compress a chunk of data with BrotliCompress.
BrotliCompress
Decompress a chunk of data with BrotliDecompress.
BrotliDecompress
Compress a chunk of data with Deflate.
Deflate
Compress a chunk of data with DeflateRaw.
DeflateRaw
Decompress a chunk of data with Gunzip.
Gunzip
Compress a chunk of data with Gzip.
Gzip
Decompress a chunk of data with Inflate.
Inflate
Decompress a chunk of data with InflateRaw.
InflateRaw
Decompress a chunk of data with Unzip.
Unzip
Compress data using the Brotli algorithm.
Decompress data using the Brotli algorithm.
Compress data using deflate.
Compress data using deflate, and do not append a zlib header.
Decompress a gzip stream.
Compress data using gzip.
Decompress a deflate stream.
Decompress a raw deflate stream.
Decompress either a Gzip- or Deflate-compressed stream by auto-detecting\nthe header.
Not exported by the node:zlib module. It is documented here because it is the\nbase class of the compressor/decompressor classes.
This class inherits from stream.Transform, allowing node:zlib objects to\nbe used in pipes and similar stream operations.
stream.Transform
Deprecated alias for zlib.bytesWritten. This original name was chosen\nbecause it also made sense to interpret the value as the number of bytes\nread by the engine, but is inconsistent with other streams in Node.js that\nexpose values under these names.
zlib.bytesWritten
The zlib.bytesWritten property specifies the number of bytes written to\nthe engine, before the bytes are processed (compressed or decompressed,\nas appropriate for the derived class).
Close the underlying handle.
Flush pending data. Don't call this frivolously, premature flushes negatively\nimpact the effectiveness of the compression algorithm.
Calling this only flushes data from the internal zlib state, and does not\nperform flushing of any kind on the streams level. Rather, it behaves like a\nnormal call to .write(), i.e. it will be queued up behind other pending\nwrites and will only produce output when data is being read from the stream.
.write()
This function is only available for zlib-based streams, i.e. not Brotli.
Dynamically update the compression level and compression strategy.\nOnly applicable to deflate algorithm.
Reset the compressor/decompressor to factory defaults. Only applicable to\nthe inflate and deflate algorithms.
Provides an object enumerating Zlib-related constants.
Creates and returns a new BrotliCompress object.
Creates and returns a new BrotliDecompress object.
Creates and returns a new Deflate object.
Creates and returns a new DeflateRaw object.
An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior when windowBits\nis set to 8 for raw deflate streams. zlib would automatically set windowBits\nto 9 if was initially set to 8. Newer versions of zlib will throw an exception,\nso Node.js restored the original behavior of upgrading a value of 8 to 9,\nsince passing windowBits = 9 to zlib actually results in a compressed stream\nthat effectively uses an 8-bit window only.
windowBits = 9
Creates and returns a new Gunzip object.
Creates and returns a new Gzip object.\nSee example.
Creates and returns a new Inflate object.
Creates and returns a new InflateRaw object.
Creates and returns a new Unzip object.