summaryrefslogtreecommitdiffstats
path: root/node_modules/tar
diff options
context:
space:
mode:
Diffstat (limited to 'node_modules/tar')
-rw-r--r--node_modules/tar/CHANGELOG.md68
-rw-r--r--node_modules/tar/LICENSE15
-rw-r--r--node_modules/tar/README.md1031
-rw-r--r--node_modules/tar/index.js18
-rw-r--r--node_modules/tar/lib/create.js105
-rw-r--r--node_modules/tar/lib/extract.js112
-rw-r--r--node_modules/tar/lib/get-write-flag.js20
-rw-r--r--node_modules/tar/lib/header.js288
-rw-r--r--node_modules/tar/lib/high-level-opt.js29
-rw-r--r--node_modules/tar/lib/large-numbers.js97
-rw-r--r--node_modules/tar/lib/list.js128
-rw-r--r--node_modules/tar/lib/mkdir.js206
-rw-r--r--node_modules/tar/lib/mode-fix.js24
-rw-r--r--node_modules/tar/lib/pack.js403
-rw-r--r--node_modules/tar/lib/parse.js483
-rw-r--r--node_modules/tar/lib/path-reservations.js125
-rw-r--r--node_modules/tar/lib/pax.js145
-rw-r--r--node_modules/tar/lib/read-entry.js98
-rw-r--r--node_modules/tar/lib/replace.js219
-rw-r--r--node_modules/tar/lib/types.js44
-rw-r--r--node_modules/tar/lib/unpack.js680
-rw-r--r--node_modules/tar/lib/update.js36
-rw-r--r--node_modules/tar/lib/warn-mixin.js21
-rw-r--r--node_modules/tar/lib/winchars.js23
-rw-r--r--node_modules/tar/lib/write-entry.js436
l---------node_modules/tar/node_modules/.bin/mkdirp1
-rw-r--r--node_modules/tar/node_modules/mkdirp/CHANGELOG.md15
-rw-r--r--node_modules/tar/node_modules/mkdirp/LICENSE21
-rwxr-xr-xnode_modules/tar/node_modules/mkdirp/bin/cmd.js68
-rw-r--r--node_modules/tar/node_modules/mkdirp/index.js31
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/find-made.js29
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js64
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js39
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/opts-arg.js23
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/path-arg.js29
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/use-native.js10
-rw-r--r--node_modules/tar/node_modules/mkdirp/package.json75
-rw-r--r--node_modules/tar/node_modules/mkdirp/readme.markdown266
-rw-r--r--node_modules/tar/package.json81
39 files changed, 0 insertions, 5606 deletions
diff --git a/node_modules/tar/CHANGELOG.md b/node_modules/tar/CHANGELOG.md
deleted file mode 100644
index 9373401..0000000
--- a/node_modules/tar/CHANGELOG.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Changelog
-
-## 6.0
-
-- Drop support for node 6 and 8
-- fix symlinks and hardlinks on windows being packed with `\`-style path
- targets
-
-## 5.0
-
-- Address unpack race conditions using path reservations
-- Change large-numbers errors from TypeError to Error
-- Add `TAR_*` error codes
-- Raise `TAR_BAD_ARCHIVE` warning/error when there are no valid entries
- found in an archive
-- do not treat ignored entries as an invalid archive
-- drop support for node v4
-- unpack: conditionally use a file mapping to write files on Windows
-- Set more portable 'mode' value in portable mode
-- Set `portable` gzip option in portable mode
-
-## 4.4
-
-- Add 'mtime' option to tar creation to force mtime
-- unpack: only reuse file fs entries if nlink = 1
-- unpack: rename before unlinking files on Windows
-- Fix encoding/decoding of base-256 numbers
-- Use `stat` instead of `lstat` when checking CWD
-- Always provide a callback to fs.close()
-
-## 4.3
-
-- Add 'transform' unpack option
-
-## 4.2
-
-- Fail when zlib fails
-
-## 4.1
-
-- Add noMtime flag for tar creation
-
-## 4.0
-
-- unpack: raise error if cwd is missing or not a dir
-- pack: don't drop dots from dotfiles when prefixing
-
-## 3.1
-
-- Support `@file.tar` as an entry argument to copy entries from one tar
- file to another.
-- Add `noPax` option
-- `noResume` option for tar.t
-- win32: convert `>|<?:` chars to windows-friendly form
-- Exclude mtime for dirs in portable mode
-
-## 3.0
-
-- Minipass-based implementation
-- Entirely new API surface, `tar.c()`, `tar.x()` etc., much closer to
- system tar semantics
-- Massive performance improvement
-- Require node 4.x and higher
-
-## 0.x, 1.x, 2.x - 2011-2014
-
-- fstream-based implementation
-- slow and kinda bad, but better than npm shelling out to the system `tar`
diff --git a/node_modules/tar/LICENSE b/node_modules/tar/LICENSE
deleted file mode 100644
index 19129e3..0000000
--- a/node_modules/tar/LICENSE
+++ /dev/null
@@ -1,15 +0,0 @@
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/tar/README.md b/node_modules/tar/README.md
deleted file mode 100644
index 1d69694..0000000
--- a/node_modules/tar/README.md
+++ /dev/null
@@ -1,1031 +0,0 @@
-# node-tar
-
-[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar)
-
-[Fast](./benchmarks) and full-featured Tar for Node.js
-
-The API is designed to mimic the behavior of `tar(1)` on unix systems.
-If you are familiar with how tar works, most of this will hopefully be
-straightforward for you. If not, then hopefully this module can teach
-you useful unix skills that may come in handy someday :)
-
-## Background
-
-A "tar file" or "tarball" is an archive of file system entries
-(directories, files, links, etc.) The name comes from "tape archive".
-If you run `man tar` on almost any Unix command line, you'll learn
-quite a bit about what it can do, and its history.
-
-Tar has 5 main top-level commands:
-
-* `c` Create an archive
-* `r` Replace entries within an archive
-* `u` Update entries within an archive (ie, replace if they're newer)
-* `t` List out the contents of an archive
-* `x` Extract an archive to disk
-
-The other flags and options modify how this top level function works.
-
-## High-Level API
-
-These 5 functions are the high-level API. All of them have a
-single-character name (for unix nerds familiar with `tar(1)`) as well
-as a long name (for everyone else).
-
-All the high-level functions take the following arguments, all three
-of which are optional and may be omitted.
-
-1. `options` - An optional object specifying various options
-2. `paths` - An array of paths to add or extract
-3. `callback` - Called when the command is completed, if async. (If
- sync or no file specified, providing a callback throws a
- `TypeError`.)
-
-If the command is sync (ie, if `options.sync=true`), then the
-callback is not allowed, since the action will be completed immediately.
-
-If a `file` argument is specified, and the command is async, then a
-`Promise` is returned. In this case, if async, a callback may be
-provided which is called when the command is completed.
-
-If a `file` option is not specified, then a stream is returned. For
-`create`, this is a readable stream of the generated archive. For
-`list` and `extract` this is a writable stream that an archive should
-be written into. If a file is not specified, then a callback is not
-allowed, because you're already getting a stream to work with.
-
-`replace` and `update` only work on existing archives, and so require
-a `file` argument.
-
-Sync commands without a file argument return a stream that acts on its
-input immediately in the same tick. For readable streams, this means
-that all of the data is immediately available by calling
-`stream.read()`. For writable streams, it will be acted upon as soon
-as it is provided, but this can be at any time.
-
-### Warnings and Errors
-
-Tar emits warnings and errors for recoverable and unrecoverable situations,
-respectively. In many cases, a warning only affects a single entry in an
-archive, or is simply informing you that it's modifying an entry to comply
-with the settings provided.
-
-Unrecoverable warnings will always raise an error (ie, emit `'error'` on
-streaming actions, throw for non-streaming sync actions, reject the
-returned Promise for non-streaming async operations, or call a provided
-callback with an `Error` as the first argument). Recoverable errors will
-raise an error only if `strict: true` is set in the options.
-
-Respond to (recoverable) warnings by listening to the `warn` event.
-Handlers receive 3 arguments:
-
-- `code` String. One of the error codes below. This may not match
- `data.code`, which preserves the original error code from fs and zlib.
-- `message` String. More details about the error.
-- `data` Metadata about the error. An `Error` object for errors raised by
- fs and zlib. All fields are attached to errors raisd by tar. Typically
- contains the following fields, as relevant:
- - `tarCode` The tar error code.
- - `code` Either the tar error code, or the error code set by the
- underlying system.
- - `file` The archive file being read or written.
- - `cwd` Working directory for creation and extraction operations.
- - `entry` The entry object (if it could be created) for `TAR_ENTRY_INFO`,
- `TAR_ENTRY_INVALID`, and `TAR_ENTRY_ERROR` warnings.
- - `header` The header object (if it could be created, and the entry could
- not be created) for `TAR_ENTRY_INFO` and `TAR_ENTRY_INVALID` warnings.
- - `recoverable` Boolean. If `false`, then the warning will emit an
- `error`, even in non-strict mode.
-
-#### Error Codes
-
-* `TAR_ENTRY_INFO` An informative error indicating that an entry is being
- modified, but otherwise processed normally. For example, removing `/` or
- `C:\` from absolute paths if `preservePaths` is not set.
-
-* `TAR_ENTRY_INVALID` An indication that a given entry is not a valid tar
- archive entry, and will be skipped. This occurs when:
- - a checksum fails,
- - a `linkpath` is missing for a link type, or
- - a `linkpath` is provided for a non-link type.
-
- If every entry in a parsed archive raises an `TAR_ENTRY_INVALID` error,
- then the archive is presumed to be unrecoverably broken, and
- `TAR_BAD_ARCHIVE` will be raised.
-
-* `TAR_ENTRY_ERROR` The entry appears to be a valid tar archive entry, but
- encountered an error which prevented it from being unpacked. This occurs
- when:
- - an unrecoverable fs error happens during unpacking,
- - an entry has `..` in the path and `preservePaths` is not set, or
- - an entry is extracting through a symbolic link, when `preservePaths` is
- not set.
-
-* `TAR_ENTRY_UNSUPPORTED` An indication that a given entry is
- a valid archive entry, but of a type that is unsupported, and so will be
- skipped in archive creation or extracting.
-
-* `TAR_ABORT` When parsing gzipped-encoded archives, the parser will
- abort the parse process raise a warning for any zlib errors encountered.
- Aborts are considered unrecoverable for both parsing and unpacking.
-
-* `TAR_BAD_ARCHIVE` The archive file is totally hosed. This can happen for
- a number of reasons, and always occurs at the end of a parse or extract:
-
- - An entry body was truncated before seeing the full number of bytes.
- - The archive contained only invalid entries, indicating that it is
- likely not an archive, or at least, not an archive this library can
- parse.
-
- `TAR_BAD_ARCHIVE` is considered informative for parse operations, but
- unrecoverable for extraction. Note that, if encountered at the end of an
- extraction, tar WILL still have extracted as much it could from the
- archive, so there may be some garbage files to clean up.
-
-Errors that occur deeper in the system (ie, either the filesystem or zlib)
-will have their error codes left intact, and a `tarCode` matching one of
-the above will be added to the warning metadata or the raised error object.
-
-Errors generated by tar will have one of the above codes set as the
-`error.code` field as well, but since errors originating in zlib or fs will
-have their original codes, it's better to read `error.tarCode` if you wish
-to see how tar is handling the issue.
-
-### Examples
-
-The API mimics the `tar(1)` command line functionality, with aliases
-for more human-readable option and function names. The goal is that
-if you know how to use `tar(1)` in Unix, then you know how to use
-`require('tar')` in JavaScript.
-
-To replicate `tar czf my-tarball.tgz files and folders`, you'd do:
-
-```js
-tar.c(
- {
- gzip: <true|gzip options>,
- file: 'my-tarball.tgz'
- },
- ['some', 'files', 'and', 'folders']
-).then(_ => { .. tarball has been created .. })
-```
-
-To replicate `tar cz files and folders > my-tarball.tgz`, you'd do:
-
-```js
-tar.c( // or tar.create
- {
- gzip: <true|gzip options>
- },
- ['some', 'files', 'and', 'folders']
-).pipe(fs.createWriteStream('my-tarball.tgz'))
-```
-
-To replicate `tar xf my-tarball.tgz` you'd do:
-
-```js
-tar.x( // or tar.extract(
- {
- file: 'my-tarball.tgz'
- }
-).then(_=> { .. tarball has been dumped in cwd .. })
-```
-
-To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`:
-
-```js
-fs.createReadStream('my-tarball.tgz').pipe(
- tar.x({
- strip: 1,
- C: 'some-dir' // alias for cwd:'some-dir', also ok
- })
-)
-```
-
-To replicate `tar tf my-tarball.tgz`, do this:
-
-```js
-tar.t({
- file: 'my-tarball.tgz',
- onentry: entry => { .. do whatever with it .. }
-})
-```
-
-To replicate `cat my-tarball.tgz | tar t` do:
-
-```js
-fs.createReadStream('my-tarball.tgz')
- .pipe(tar.t())
- .on('entry', entry => { .. do whatever with it .. })
-```
-
-To do anything synchronous, add `sync: true` to the options. Note
-that sync functions don't take a callback and don't return a promise.
-When the function returns, it's already done. Sync methods without a
-file argument return a sync stream, which flushes immediately. But,
-of course, it still won't be done until you `.end()` it.
-
-To filter entries, add `filter: <function>` to the options.
-Tar-creating methods call the filter with `filter(path, stat)`.
-Tar-reading methods (including extraction) call the filter with
-`filter(path, entry)`. The filter is called in the `this`-context of
-the `Pack` or `Unpack` stream object.
-
-The arguments list to `tar t` and `tar x` specify a list of filenames
-to extract or list, so they're equivalent to a filter that tests if
-the file is in the list.
-
-For those who _aren't_ fans of tar's single-character command names:
-
-```
-tar.c === tar.create
-tar.r === tar.replace (appends to archive, file is required)
-tar.u === tar.update (appends if newer, file is required)
-tar.x === tar.extract
-tar.t === tar.list
-```
-
-Keep reading for all the command descriptions and options, as well as
-the low-level API that they are built on.
-
-### tar.c(options, fileList, callback) [alias: tar.create]
-
-Create a tarball archive.
-
-The `fileList` is an array of paths to add to the tarball. Adding a
-directory also adds its children recursively.
-
-An entry in `fileList` that starts with an `@` symbol is a tar archive
-whose entries will be added. To add a file that starts with `@`,
-prepend it with `./`.
-
-The following options are supported:
-
-- `file` Write the tarball archive to the specified filename. If this
- is specified, then the callback will be fired when the file has been
- written, and a promise will be returned that resolves when the file
- is written. If a filename is not specified, then a Readable Stream
- will be returned which will emit the file data. [Alias: `f`]
-- `sync` Act synchronously. If this is set, then any provided file
- will be fully written after the call to `tar.c`. If this is set,
- and a file is not provided, then the resulting stream will already
- have the data ready to `read` or `emit('data')` as soon as you
- request it.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`. [Alias: `C`]
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()` [Alias: `z`]
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths. [Alias: `P`]
-- `mode` The mode to set on the created file archive
-- `noDirRecurse` Do not recursively archive the contents of
- directories. [Alias: `n`]
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such. [Alias: `L`, `h`]
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
- [Alias: `m`, `no-mtime`]
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-
-The following options are mostly internal, but can be modified in some
-advanced use cases, such as re-using caches between runs.
-
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `readdirCache` A Map object that caches calls to `readdir`.
-- `jobs` A number specifying how many concurrent jobs to run.
- Defaults to 4.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-
-### tar.x(options, fileList, callback) [alias: tar.extract]
-
-Extract a tarball archive.
-
-The `fileList` is an array of paths to extract from the tarball. If
-no paths are provided, then all the entries are extracted.
-
-If the archive is gzipped, then tar will detect this and unzip it.
-
-Note that all directories that are created will be forced to be
-writable, readable, and listable by their owner, to avoid cases where
-a directory prevents extraction of child entries by virtue of its
-mode.
-
-Most extraction errors will cause a `warn` event to be emitted. If
-the `cwd` is missing, or not a directory, then the extraction will
-fail completely.
-
-The following options are supported:
-
-- `cwd` Extract files relative to the specified directory. Defaults
- to `process.cwd()`. If provided, this must exist and must be a
- directory. [Alias: `C`]
-- `file` The archive file to extract. If not specified, then a
- Writable stream is returned where the archive data should be
- written. [Alias: `f`]
-- `sync` Create files and directories synchronously.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being unpacked. Return `true` to unpack the entry from the
- archive, or `false` to skip it.
-- `newer` Set to true to keep the existing file on disk if it's newer
- than the file in the archive. [Alias: `keep-newer`,
- `keep-newer-files`]
-- `keep` Do not overwrite existing files. In particular, if a file
- appears more than once in an archive, later copies will not
- overwrite earlier copies. [Alias: `k`, `keep-existing`]
-- `preservePaths` Allow absolute paths, paths containing `..`, and
- extracting through symbolic links. By default, `/` is stripped from
- absolute paths, `..` paths are not extracted, and any file whose
- location would be modified by a symbolic link is not extracted.
- [Alias: `P`]
-- `unlink` Unlink files before creating them. Without this option,
- tar overwrites existing files, which preserves existing hardlinks.
- With this option, existing hardlinks will be broken, as will any
- symlink that would affect the location of an extracted file. [Alias:
- `U`]
-- `strip` Remove the specified number of leading path elements.
- Pathnames with fewer elements will be silently skipped. Note that
- the pathname is edited after applying the filter, but before
- security checks. [Alias: `strip-components`, `stripComponents`]
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `preserveOwner` If true, tar will set the `uid` and `gid` of
- extracted entries to the `uid` and `gid` fields in the archive.
- This defaults to true when run as root, and false otherwise. If
- false, then files and directories will be set with the owner and
- group of the user running the process. This is similar to `-p` in
- `tar(1)`, but ACLs and other system-specific data is never unpacked
- in this implementation, and modes are set by default already.
- [Alias: `p`]
-- `uid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified user id, regardless of the `uid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `gid` option.
-- `gid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified group id, regardless of the `gid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `uid` option.
-- `noMtime` Set to true to omit writing `mtime` value for extracted
- entries. [Alias: `m`, `no-mtime`]
-- `transform` Provide a function that takes an `entry` object, and
- returns a stream, or any falsey value. If a stream is provided,
- then that stream's data will be written instead of the contents of
- the archive entry. If a falsey value is provided, then the entry is
- written to disk as normal. (To exclude items from extraction, use
- the `filter` option described above.)
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter.
-
-The following options are mostly internal, but can be modified in some
-advanced use cases, such as re-using caches between runs.
-
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `umask` Filter the modes of entries like `process.umask()`.
-- `dmode` Default mode for directories
-- `fmode` Default mode for files
-- `dirCache` A Map object of which directories exist.
-- `maxMetaEntrySize` The maximum size of meta entries that is
- supported. Defaults to 1 MB.
-
-Note that using an asynchronous stream type with the `transform`
-option will cause undefined behavior in sync extractions.
-[MiniPass](http://npm.im/minipass)-based streams are designed for this
-use case.
-
-### tar.t(options, fileList, callback) [alias: tar.list]
-
-List the contents of a tarball archive.
-
-The `fileList` is an array of paths to list from the tarball. If
-no paths are provided, then all the entries are listed.
-
-If the archive is gzipped, then tar will detect this and unzip it.
-
-Returns an event emitter that emits `entry` events with
-`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'`
-events. (If you want to get actual readable entries, use the
-`tar.Parse` class instead.)
-
-The following options are supported:
-
-- `cwd` Extract files relative to the specified directory. Defaults
- to `process.cwd()`. [Alias: `C`]
-- `file` The archive file to list. If not specified, then a
- Writable stream is returned where the archive data should be
- written. [Alias: `f`]
-- `sync` Read the specified file synchronously. (This has no effect
- when a file option isn't specified, because entries are emitted as
- fast as they are parsed from the stream anyway.)
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being listed. Return `true` to emit the entry from the
- archive, or `false` to skip it.
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter. This is important for when both `file` and
- `sync` are set, because it will be called synchronously.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noResume` By default, `entry` streams are resumed immediately after
- the call to `onentry`. Set `noResume: true` to suppress this
- behavior. Note that by opting into this, the stream will never
- complete until the entry data is consumed.
-
-### tar.u(options, fileList, callback) [alias: tar.update]
-
-Add files to an archive if they are newer than the entry already in
-the tarball archive.
-
-The `fileList` is an array of paths to add to the tarball. Adding a
-directory also adds its children recursively.
-
-An entry in `fileList` that starts with an `@` symbol is a tar archive
-whose entries will be added. To add a file that starts with `@`,
-prepend it with `./`.
-
-The following options are supported:
-
-- `file` Required. Write the tarball archive to the specified
- filename. [Alias: `f`]
-- `sync` Act synchronously. If this is set, then any provided file
- will be fully written after the call to `tar.c`.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for adding entries to the
- archive. Defaults to `process.cwd()`. [Alias: `C`]
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()` [Alias: `z`]
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths. [Alias: `P`]
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noDirRecurse` Do not recursively archive the contents of
- directories. [Alias: `n`]
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such. [Alias: `L`, `h`]
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
- [Alias: `m`, `no-mtime`]
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-### tar.r(options, fileList, callback) [alias: tar.replace]
-
-Add files to an existing archive. Because later entries override
-earlier entries, this effectively replaces any existing entries.
-
-The `fileList` is an array of paths to add to the tarball. Adding a
-directory also adds its children recursively.
-
-An entry in `fileList` that starts with an `@` symbol is a tar archive
-whose entries will be added. To add a file that starts with `@`,
-prepend it with `./`.
-
-The following options are supported:
-
-- `file` Required. Write the tarball archive to the specified
- filename. [Alias: `f`]
-- `sync` Act synchronously. If this is set, then any provided file
- will be fully written after the call to `tar.c`.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for adding entries to the
- archive. Defaults to `process.cwd()`. [Alias: `C`]
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()` [Alias: `z`]
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths. [Alias: `P`]
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noDirRecurse` Do not recursively archive the contents of
- directories. [Alias: `n`]
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such. [Alias: `L`, `h`]
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
- [Alias: `m`, `no-mtime`]
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-
-## Low-Level API
-
-### class tar.Pack
-
-A readable tar stream.
-
-Has all the standard readable stream interface stuff. `'data'` and
-`'end'` events, `read()` method, `pause()` and `resume()`, etc.
-
-#### constructor(options)
-
-The following options are supported:
-
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`.
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()`
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `readdirCache` A Map object that caches calls to `readdir`.
-- `jobs` A number specifying how many concurrent jobs to run.
- Defaults to 4.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noDirRecurse` Do not recursively archive the contents of
- directories.
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such.
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-#### add(path)
-
-Adds an entry to the archive. Returns the Pack stream.
-
-#### write(path)
-
-Adds an entry to the archive. Returns true if flushed.
-
-#### end()
-
-Finishes the archive.
-
-### class tar.Pack.Sync
-
-Synchronous version of `tar.Pack`.
-
-### class tar.Unpack
-
-A writable stream that unpacks a tar archive onto the file system.
-
-All the normal writable stream stuff is supported. `write()` and
-`end()` methods, `'drain'` events, etc.
-
-Note that all directories that are created will be forced to be
-writable, readable, and listable by their owner, to avoid cases where
-a directory prevents extraction of child entries by virtue of its
-mode.
-
-`'close'` is emitted when it's done writing stuff to the file system.
-
-Most unpack errors will cause a `warn` event to be emitted. If the
-`cwd` is missing, or not a directory, then an error will be emitted.
-
-#### constructor(options)
-
-- `cwd` Extract files relative to the specified directory. Defaults
- to `process.cwd()`. If provided, this must exist and must be a
- directory.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being unpacked. Return `true` to unpack the entry from the
- archive, or `false` to skip it.
-- `newer` Set to true to keep the existing file on disk if it's newer
- than the file in the archive.
-- `keep` Do not overwrite existing files. In particular, if a file
- appears more than once in an archive, later copies will not
- overwrite earlier copies.
-- `preservePaths` Allow absolute paths, paths containing `..`, and
- extracting through symbolic links. By default, `/` is stripped from
- absolute paths, `..` paths are not extracted, and any file whose
- location would be modified by a symbolic link is not extracted.
-- `unlink` Unlink files before creating them. Without this option,
- tar overwrites existing files, which preserves existing hardlinks.
- With this option, existing hardlinks will be broken, as will any
- symlink that would affect the location of an extracted file.
-- `strip` Remove the specified number of leading path elements.
- Pathnames with fewer elements will be silently skipped. Note that
- the pathname is edited after applying the filter, but before
- security checks.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `umask` Filter the modes of entries like `process.umask()`.
-- `dmode` Default mode for directories
-- `fmode` Default mode for files
-- `dirCache` A Map object of which directories exist.
-- `maxMetaEntrySize` The maximum size of meta entries that is
- supported. Defaults to 1 MB.
-- `preserveOwner` If true, tar will set the `uid` and `gid` of
- extracted entries to the `uid` and `gid` fields in the archive.
- This defaults to true when run as root, and false otherwise. If
- false, then files and directories will be set with the owner and
- group of the user running the process. This is similar to `-p` in
- `tar(1)`, but ACLs and other system-specific data is never unpacked
- in this implementation, and modes are set by default already.
-- `win32` True if on a windows platform. Causes behavior where
- filenames containing `<|>?` chars are converted to
- windows-compatible values while being unpacked.
-- `uid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified user id, regardless of the `uid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `gid` option.
-- `gid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified group id, regardless of the `gid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `uid` option.
-- `noMtime` Set to true to omit writing `mtime` value for extracted
- entries.
-- `transform` Provide a function that takes an `entry` object, and
- returns a stream, or any falsey value. If a stream is provided,
- then that stream's data will be written instead of the contents of
- the archive entry. If a falsey value is provided, then the entry is
- written to disk as normal. (To exclude items from extraction, use
- the `filter` option described above.)
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-
-### class tar.Unpack.Sync
-
-Synchronous version of `tar.Unpack`.
-
-Note that using an asynchronous stream type with the `transform`
-option will cause undefined behavior in sync unpack streams.
-[MiniPass](http://npm.im/minipass)-based streams are designed for this
-use case.
-
-### class tar.Parse
-
-A writable stream that parses a tar archive stream. All the standard
-writable stream stuff is supported.
-
-If the archive is gzipped, then tar will detect this and unzip it.
-
-Emits `'entry'` events with `tar.ReadEntry` objects, which are
-themselves readable streams that you can pipe wherever.
-
-Each `entry` will not emit until the one before it is flushed through,
-so make sure to either consume the data (with `on('data', ...)` or
-`.pipe(...)`) or throw it away with `.resume()` to keep the stream
-flowing.
-
-#### constructor(options)
-
-Returns an event emitter that emits `entry` events with
-`tar.ReadEntry` objects.
-
-The following options are supported:
-
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being listed. Return `true` to emit the entry from the
- archive, or `false` to skip it.
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-
-#### abort(error)
-
-Stop all parsing activities. This is called when there are zlib
-errors. It also emits an unrecoverable warning with the error provided.
-
-### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
-
-A representation of an entry that is being read out of a tar archive.
-
-It has the following fields:
-
-- `extended` The extended metadata object provided to the constructor.
-- `globalExtended` The global extended metadata object provided to the
- constructor.
-- `remain` The number of bytes remaining to be written into the
- stream.
-- `blockRemain` The number of 512-byte blocks remaining to be written
- into the stream.
-- `ignore` Whether this entry should be ignored.
-- `meta` True if this represents metadata about the next entry, false
- if it represents a filesystem object.
-- All the fields from the header, extended header, and global extended
- header are added to the ReadEntry object. So it has `path`, `type`,
- `size`, `mode`, and so on.
-
-#### constructor(header, extended, globalExtended)
-
-Create a new ReadEntry object with the specified header, extended
-header, and global extended header values.
-
-### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass)
-
-A representation of an entry that is being written from the file
-system into a tar archive.
-
-Emits data for the Header, and for the Pax Extended Header if one is
-required, as well as any body data.
-
-Creating a WriteEntry for a directory does not also create
-WriteEntry objects for all of the directory contents.
-
-It has the following fields:
-
-- `path` The path field that will be written to the archive. By
- default, this is also the path from the cwd to the file system
- object.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `myuid` If supported, the uid of the user running the current
- process.
-- `myuser` The `env.USER` string if set, or `''`. Set as the entry
- `uname` field if the file's `uid` matches `this.myuid`.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 1 MB.
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`.
-- `absolute` The absolute path to the entry on the filesystem. By
- default, this is `path.resolve(this.cwd, this.path)`, but it can be
- overridden explicitly.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `win32` True if on a windows platform. Causes behavior where paths
- replace `\` with `/` and filenames containing the windows-compatible
- forms of `<|>?:` characters are converted to actual `<|>?:` characters
- in the archive.
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-
-
-#### constructor(path, options)
-
-`path` is the path of the entry as it is written in the archive.
-
-The following options are supported:
-
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 1 MB.
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`.
-- `absolute` The absolute path to the entry on the filesystem. By
- default, this is `path.resolve(this.cwd, this.path)`, but it can be
- overridden explicitly.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `win32` True if on a windows platform. Causes behavior where paths
- replace `\` with `/`.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-- `umask` Set to restrict the modes on the entries in the archive,
- somewhat like how umask works on file creation. Defaults to
- `process.umask()` on unix systems, or `0o22` on Windows.
-
-#### warn(message, data)
-
-If strict, emit an error with the provided message.
-
-Othewise, emit a `'warn'` event with the provided message and data.
-
-### class tar.WriteEntry.Sync
-
-Synchronous version of tar.WriteEntry
-
-### class tar.WriteEntry.Tar
-
-A version of tar.WriteEntry that gets its data from a tar.ReadEntry
-instead of from the filesystem.
-
-#### constructor(readEntry, options)
-
-`readEntry` is the entry being read out of another archive.
-
-The following options are supported:
-
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-
-### class tar.Header
-
-A class for reading and writing header blocks.
-
-It has the following fields:
-
-- `nullBlock` True if decoding a block which is entirely composed of
- `0x00` null bytes. (Useful because tar files are terminated by
- at least 2 null blocks.)
-- `cksumValid` True if the checksum in the header is valid, false
- otherwise.
-- `needPax` True if the values, as encoded, will require a Pax
- extended header.
-- `path` The path of the entry.
-- `mode` The 4 lowest-order octal digits of the file mode. That is,
- read/write/execute permissions for world, group, and owner, and the
- setuid, setgid, and sticky bits.
-- `uid` Numeric user id of the file owner
-- `gid` Numeric group id of the file owner
-- `size` Size of the file in bytes
-- `mtime` Modified time of the file
-- `cksum` The checksum of the header. This is generated by adding all
- the bytes of the header block, treating the checksum field itself as
- all ascii space characters (that is, `0x20`).
-- `type` The human-readable name of the type of entry this represents,
- or the alphanumeric key if unknown.
-- `typeKey` The alphanumeric key for the type of entry this header
- represents.
-- `linkpath` The target of Link and SymbolicLink entries.
-- `uname` Human-readable user name of the file owner
-- `gname` Human-readable group name of the file owner
-- `devmaj` The major portion of the device number. Always `0` for
- files, directories, and links.
-- `devmin` The minor portion of the device number. Always `0` for
- files, directories, and links.
-- `atime` File access time.
-- `ctime` File change time.
-
-#### constructor(data, [offset=0])
-
-`data` is optional. It is either a Buffer that should be interpreted
-as a tar Header starting at the specified offset and continuing for
-512 bytes, or a data object of keys and values to set on the header
-object, and eventually encode as a tar Header.
-
-#### decode(block, offset)
-
-Decode the provided buffer starting at the specified offset.
-
-Buffer length must be greater than 512 bytes.
-
-#### set(data)
-
-Set the fields in the data object.
-
-#### encode(buffer, offset)
-
-Encode the header fields into the buffer at the specified offset.
-
-Returns `this.needPax` to indicate whether a Pax Extended Header is
-required to properly encode the specified data.
-
-### class tar.Pax
-
-An object representing a set of key-value pairs in an Pax extended
-header entry.
-
-It has the following fields. Where the same name is used, they have
-the same semantics as the tar.Header field of the same name.
-
-- `global` True if this represents a global extended header, or false
- if it is for a single entry.
-- `atime`
-- `charset`
-- `comment`
-- `ctime`
-- `gid`
-- `gname`
-- `linkpath`
-- `mtime`
-- `path`
-- `size`
-- `uid`
-- `uname`
-- `dev`
-- `ino`
-- `nlink`
-
-#### constructor(object, global)
-
-Set the fields set in the object. `global` is a boolean that defaults
-to false.
-
-#### encode()
-
-Return a Buffer containing the header and body for the Pax extended
-header entry, or `null` if there is nothing to encode.
-
-#### encodeBody()
-
-Return a string representing the body of the pax extended header
-entry.
-
-#### encodeField(fieldName)
-
-Return a string representing the key/value encoding for the specified
-fieldName, or `''` if the field is unset.
-
-### tar.Pax.parse(string, extended, global)
-
-Return a new Pax object created by parsing the contents of the string
-provided.
-
-If the `extended` object is set, then also add the fields from that
-object. (This is necessary because multiple metadata entries can
-occur in sequence.)
-
-### tar.types
-
-A translation table for the `type` field in tar headers.
-
-#### tar.types.name.get(code)
-
-Get the human-readable name for a given alphanumeric code.
-
-#### tar.types.code.get(name)
-
-Get the alphanumeric code for a given human-readable name.
diff --git a/node_modules/tar/index.js b/node_modules/tar/index.js
deleted file mode 100644
index c9ae06e..0000000
--- a/node_modules/tar/index.js
+++ /dev/null
@@ -1,18 +0,0 @@
-'use strict'
-
-// high-level commands
-exports.c = exports.create = require('./lib/create.js')
-exports.r = exports.replace = require('./lib/replace.js')
-exports.t = exports.list = require('./lib/list.js')
-exports.u = exports.update = require('./lib/update.js')
-exports.x = exports.extract = require('./lib/extract.js')
-
-// classes
-exports.Pack = require('./lib/pack.js')
-exports.Unpack = require('./lib/unpack.js')
-exports.Parse = require('./lib/parse.js')
-exports.ReadEntry = require('./lib/read-entry.js')
-exports.WriteEntry = require('./lib/write-entry.js')
-exports.Header = require('./lib/header.js')
-exports.Pax = require('./lib/pax.js')
-exports.types = require('./lib/types.js')
diff --git a/node_modules/tar/lib/create.js b/node_modules/tar/lib/create.js
deleted file mode 100644
index a37aa52..0000000
--- a/node_modules/tar/lib/create.js
+++ /dev/null
@@ -1,105 +0,0 @@
-'use strict'
-
-// tar -c
-const hlo = require('./high-level-opt.js')
-
-const Pack = require('./pack.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const t = require('./list.js')
-const path = require('path')
-
-const c = module.exports = (opt_, files, cb) => {
- if (typeof files === 'function')
- cb = files
-
- if (Array.isArray(opt_))
- files = opt_, opt_ = {}
-
- if (!files || !Array.isArray(files) || !files.length)
- throw new TypeError('no files or directories specified')
-
- files = Array.from(files)
-
- const opt = hlo(opt_)
-
- if (opt.sync && typeof cb === 'function')
- throw new TypeError('callback not supported for sync tar functions')
-
- if (!opt.file && typeof cb === 'function')
- throw new TypeError('callback only supported with file option')
-
- return opt.file && opt.sync ? createFileSync(opt, files)
- : opt.file ? createFile(opt, files, cb)
- : opt.sync ? createSync(opt, files)
- : create(opt, files)
-}
-
-const createFileSync = (opt, files) => {
- const p = new Pack.Sync(opt)
- const stream = new fsm.WriteStreamSync(opt.file, {
- mode: opt.mode || 0o666
- })
- p.pipe(stream)
- addFilesSync(p, files)
-}
-
-const createFile = (opt, files, cb) => {
- const p = new Pack(opt)
- const stream = new fsm.WriteStream(opt.file, {
- mode: opt.mode || 0o666
- })
- p.pipe(stream)
-
- const promise = new Promise((res, rej) => {
- stream.on('error', rej)
- stream.on('close', res)
- p.on('error', rej)
- })
-
- addFilesAsync(p, files)
-
- return cb ? promise.then(cb, cb) : promise
-}
-
-const addFilesSync = (p, files) => {
- files.forEach(file => {
- if (file.charAt(0) === '@')
- t({
- file: path.resolve(p.cwd, file.substr(1)),
- sync: true,
- noResume: true,
- onentry: entry => p.add(entry)
- })
- else
- p.add(file)
- })
- p.end()
-}
-
-const addFilesAsync = (p, files) => {
- while (files.length) {
- const file = files.shift()
- if (file.charAt(0) === '@')
- return t({
- file: path.resolve(p.cwd, file.substr(1)),
- noResume: true,
- onentry: entry => p.add(entry)
- }).then(_ => addFilesAsync(p, files))
- else
- p.add(file)
- }
- p.end()
-}
-
-const createSync = (opt, files) => {
- const p = new Pack.Sync(opt)
- addFilesSync(p, files)
- return p
-}
-
-const create = (opt, files) => {
- const p = new Pack(opt)
- addFilesAsync(p, files)
- return p
-}
diff --git a/node_modules/tar/lib/extract.js b/node_modules/tar/lib/extract.js
deleted file mode 100644
index cbb458a..0000000
--- a/node_modules/tar/lib/extract.js
+++ /dev/null
@@ -1,112 +0,0 @@
-'use strict'
-
-// tar -x
-const hlo = require('./high-level-opt.js')
-const Unpack = require('./unpack.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const path = require('path')
-
-const x = module.exports = (opt_, files, cb) => {
- if (typeof opt_ === 'function')
- cb = opt_, files = null, opt_ = {}
- else if (Array.isArray(opt_))
- files = opt_, opt_ = {}
-
- if (typeof files === 'function')
- cb = files, files = null
-
- if (!files)
- files = []
- else
- files = Array.from(files)
-
- const opt = hlo(opt_)
-
- if (opt.sync && typeof cb === 'function')
- throw new TypeError('callback not supported for sync tar functions')
-
- if (!opt.file && typeof cb === 'function')
- throw new TypeError('callback only supported with file option')
-
- if (files.length)
- filesFilter(opt, files)
-
- return opt.file && opt.sync ? extractFileSync(opt)
- : opt.file ? extractFile(opt, cb)
- : opt.sync ? extractSync(opt)
- : extract(opt)
-}
-
-// construct a filter that limits the file entries listed
-// include child entries if a dir is included
-const filesFilter = (opt, files) => {
- const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
- const filter = opt.filter
-
- const mapHas = (file, r) => {
- const root = r || path.parse(file).root || '.'
- const ret = file === root ? false
- : map.has(file) ? map.get(file)
- : mapHas(path.dirname(file), root)
-
- map.set(file, ret)
- return ret
- }
-
- opt.filter = filter
- ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
- : file => mapHas(file.replace(/\/+$/, ''))
-}
-
-const extractFileSync = opt => {
- const u = new Unpack.Sync(opt)
-
- const file = opt.file
- let threw = true
- let fd
- const stat = fs.statSync(file)
- // This trades a zero-byte read() syscall for a stat
- // However, it will usually result in less memory allocation
- const readSize = opt.maxReadSize || 16*1024*1024
- const stream = new fsm.ReadStreamSync(file, {
- readSize: readSize,
- size: stat.size
- })
- stream.pipe(u)
-}
-
-const extractFile = (opt, cb) => {
- const u = new Unpack(opt)
- const readSize = opt.maxReadSize || 16*1024*1024
-
- const file = opt.file
- const p = new Promise((resolve, reject) => {
- u.on('error', reject)
- u.on('close', resolve)
-
- // This trades a zero-byte read() syscall for a stat
- // However, it will usually result in less memory allocation
- fs.stat(file, (er, stat) => {
- if (er)
- reject(er)
- else {
- const stream = new fsm.ReadStream(file, {
- readSize: readSize,
- size: stat.size
- })
- stream.on('error', reject)
- stream.pipe(u)
- }
- })
- })
- return cb ? p.then(cb, cb) : p
-}
-
-const extractSync = opt => {
- return new Unpack.Sync(opt)
-}
-
-const extract = opt => {
- return new Unpack(opt)
-}
diff --git a/node_modules/tar/lib/get-write-flag.js b/node_modules/tar/lib/get-write-flag.js
deleted file mode 100644
index e869599..0000000
--- a/node_modules/tar/lib/get-write-flag.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Get the appropriate flag to use for creating files
-// We use fmap on Windows platforms for files less than
-// 512kb. This is a fairly low limit, but avoids making
-// things slower in some cases. Since most of what this
-// library is used for is extracting tarballs of many
-// relatively small files in npm packages and the like,
-// it can be a big boost on Windows platforms.
-// Only supported in Node v12.9.0 and above.
-const platform = process.env.__FAKE_PLATFORM__ || process.platform
-const isWindows = platform === 'win32'
-const fs = global.__FAKE_TESTING_FS__ || require('fs')
-
-/* istanbul ignore next */
-const { O_CREAT, O_TRUNC, O_WRONLY, UV_FS_O_FILEMAP = 0 } = fs.constants
-
-const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP
-const fMapLimit = 512 * 1024
-const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY
-module.exports = !fMapEnabled ? () => 'w'
- : size => size < fMapLimit ? fMapFlag : 'w'
diff --git a/node_modules/tar/lib/header.js b/node_modules/tar/lib/header.js
deleted file mode 100644
index 5d88f6c..0000000
--- a/node_modules/tar/lib/header.js
+++ /dev/null
@@ -1,288 +0,0 @@
-'use strict'
-// parse a 512-byte header block to a data object, or vice-versa
-// encode returns `true` if a pax extended header is needed, because
-// the data could not be faithfully encoded in a simple header.
-// (Also, check header.needPax to see if it needs a pax header.)
-
-const types = require('./types.js')
-const pathModule = require('path').posix
-const large = require('./large-numbers.js')
-
-const SLURP = Symbol('slurp')
-const TYPE = Symbol('type')
-
-class Header {
- constructor (data, off, ex, gex) {
- this.cksumValid = false
- this.needPax = false
- this.nullBlock = false
-
- this.block = null
- this.path = null
- this.mode = null
- this.uid = null
- this.gid = null
- this.size = null
- this.mtime = null
- this.cksum = null
- this[TYPE] = '0'
- this.linkpath = null
- this.uname = null
- this.gname = null
- this.devmaj = 0
- this.devmin = 0
- this.atime = null
- this.ctime = null
-
- if (Buffer.isBuffer(data))
- this.decode(data, off || 0, ex, gex)
- else if (data)
- this.set(data)
- }
-
- decode (buf, off, ex, gex) {
- if (!off)
- off = 0
-
- if (!buf || !(buf.length >= off + 512))
- throw new Error('need 512 bytes for header')
-
- this.path = decString(buf, off, 100)
- this.mode = decNumber(buf, off + 100, 8)
- this.uid = decNumber(buf, off + 108, 8)
- this.gid = decNumber(buf, off + 116, 8)
- this.size = decNumber(buf, off + 124, 12)
- this.mtime = decDate(buf, off + 136, 12)
- this.cksum = decNumber(buf, off + 148, 12)
-
- // if we have extended or global extended headers, apply them now
- // See https://github.com/npm/node-tar/pull/187
- this[SLURP](ex)
- this[SLURP](gex, true)
-
- // old tar versions marked dirs as a file with a trailing /
- this[TYPE] = decString(buf, off + 156, 1)
- if (this[TYPE] === '')
- this[TYPE] = '0'
- if (this[TYPE] === '0' && this.path.substr(-1) === '/')
- this[TYPE] = '5'
-
- // tar implementations sometimes incorrectly put the stat(dir).size
- // as the size in the tarball, even though Directory entries are
- // not able to have any body at all. In the very rare chance that
- // it actually DOES have a body, we weren't going to do anything with
- // it anyway, and it'll just be a warning about an invalid header.
- if (this[TYPE] === '5')
- this.size = 0
-
- this.linkpath = decString(buf, off + 157, 100)
- if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
- this.uname = decString(buf, off + 265, 32)
- this.gname = decString(buf, off + 297, 32)
- this.devmaj = decNumber(buf, off + 329, 8)
- this.devmin = decNumber(buf, off + 337, 8)
- if (buf[off + 475] !== 0) {
- // definitely a prefix, definitely >130 chars.
- const prefix = decString(buf, off + 345, 155)
- this.path = prefix + '/' + this.path
- } else {
- const prefix = decString(buf, off + 345, 130)
- if (prefix)
- this.path = prefix + '/' + this.path
- this.atime = decDate(buf, off + 476, 12)
- this.ctime = decDate(buf, off + 488, 12)
- }
- }
-
- let sum = 8 * 0x20
- for (let i = off; i < off + 148; i++) {
- sum += buf[i]
- }
- for (let i = off + 156; i < off + 512; i++) {
- sum += buf[i]
- }
- this.cksumValid = sum === this.cksum
- if (this.cksum === null && sum === 8 * 0x20)
- this.nullBlock = true
- }
-
- [SLURP] (ex, global) {
- for (let k in ex) {
- // we slurp in everything except for the path attribute in
- // a global extended header, because that's weird.
- if (ex[k] !== null && ex[k] !== undefined &&
- !(global && k === 'path'))
- this[k] = ex[k]
- }
- }
-
- encode (buf, off) {
- if (!buf) {
- buf = this.block = Buffer.alloc(512)
- off = 0
- }
-
- if (!off)
- off = 0
-
- if (!(buf.length >= off + 512))
- throw new Error('need 512 bytes for header')
-
- const prefixSize = this.ctime || this.atime ? 130 : 155
- const split = splitPrefix(this.path || '', prefixSize)
- const path = split[0]
- const prefix = split[1]
- this.needPax = split[2]
-
- this.needPax = encString(buf, off, 100, path) || this.needPax
- this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
- this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
- this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
- this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
- this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
- buf[off + 156] = this[TYPE].charCodeAt(0)
- this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
- buf.write('ustar\u000000', off + 257, 8)
- this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
- this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
- this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
- this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
- this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
- if (buf[off + 475] !== 0)
- this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
- else {
- this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
- this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
- this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
- }
-
- let sum = 8 * 0x20
- for (let i = off; i < off + 148; i++) {
- sum += buf[i]
- }
- for (let i = off + 156; i < off + 512; i++) {
- sum += buf[i]
- }
- this.cksum = sum
- encNumber(buf, off + 148, 8, this.cksum)
- this.cksumValid = true
-
- return this.needPax
- }
-
- set (data) {
- for (let i in data) {
- if (data[i] !== null && data[i] !== undefined)
- this[i] = data[i]
- }
- }
-
- get type () {
- return types.name.get(this[TYPE]) || this[TYPE]
- }
-
- get typeKey () {
- return this[TYPE]
- }
-
- set type (type) {
- if (types.code.has(type))
- this[TYPE] = types.code.get(type)
- else
- this[TYPE] = type
- }
-}
-
-const splitPrefix = (p, prefixSize) => {
- const pathSize = 100
- let pp = p
- let prefix = ''
- let ret
- const root = pathModule.parse(p).root || '.'
-
- if (Buffer.byteLength(pp) < pathSize)
- ret = [pp, prefix, false]
- else {
- // first set prefix to the dir, and path to the base
- prefix = pathModule.dirname(pp)
- pp = pathModule.basename(pp)
-
- do {
- // both fit!
- if (Buffer.byteLength(pp) <= pathSize &&
- Buffer.byteLength(prefix) <= prefixSize)
- ret = [pp, prefix, false]
-
- // prefix fits in prefix, but path doesn't fit in path
- else if (Buffer.byteLength(pp) > pathSize &&
- Buffer.byteLength(prefix) <= prefixSize)
- ret = [pp.substr(0, pathSize - 1), prefix, true]
-
- else {
- // make path take a bit from prefix
- pp = pathModule.join(pathModule.basename(prefix), pp)
- prefix = pathModule.dirname(prefix)
- }
- } while (prefix !== root && !ret)
-
- // at this point, found no resolution, just truncate
- if (!ret)
- ret = [p.substr(0, pathSize - 1), '', true]
- }
- return ret
-}
-
-const decString = (buf, off, size) =>
- buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
-
-const decDate = (buf, off, size) =>
- numToDate(decNumber(buf, off, size))
-
-const numToDate = num => num === null ? null : new Date(num * 1000)
-
-const decNumber = (buf, off, size) =>
- buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
- : decSmallNumber(buf, off, size)
-
-const nanNull = value => isNaN(value) ? null : value
-
-const decSmallNumber = (buf, off, size) =>
- nanNull(parseInt(
- buf.slice(off, off + size)
- .toString('utf8').replace(/\0.*$/, '').trim(), 8))
-
-// the maximum encodable as a null-terminated octal, by field size
-const MAXNUM = {
- 12: 0o77777777777,
- 8 : 0o7777777
-}
-
-const encNumber = (buf, off, size, number) =>
- number === null ? false :
- number > MAXNUM[size] || number < 0
- ? (large.encode(number, buf.slice(off, off + size)), true)
- : (encSmallNumber(buf, off, size, number), false)
-
-const encSmallNumber = (buf, off, size, number) =>
- buf.write(octalString(number, size), off, size, 'ascii')
-
-const octalString = (number, size) =>
- padOctal(Math.floor(number).toString(8), size)
-
-const padOctal = (string, size) =>
- (string.length === size - 1 ? string
- : new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
-
-const encDate = (buf, off, size, date) =>
- date === null ? false :
- encNumber(buf, off, size, date.getTime() / 1000)
-
-// enough to fill the longest string we've got
-const NULLS = new Array(156).join('\0')
-// pad with nulls, return true if it's longer or non-ascii
-const encString = (buf, off, size, string) =>
- string === null ? false :
- (buf.write(string + NULLS, off, size, 'utf8'),
- string.length !== Buffer.byteLength(string) || string.length > size)
-
-module.exports = Header
diff --git a/node_modules/tar/lib/high-level-opt.js b/node_modules/tar/lib/high-level-opt.js
deleted file mode 100644
index 7333db9..0000000
--- a/node_modules/tar/lib/high-level-opt.js
+++ /dev/null
@@ -1,29 +0,0 @@
-'use strict'
-
-// turn tar(1) style args like `C` into the more verbose things like `cwd`
-
-const argmap = new Map([
- ['C', 'cwd'],
- ['f', 'file'],
- ['z', 'gzip'],
- ['P', 'preservePaths'],
- ['U', 'unlink'],
- ['strip-components', 'strip'],
- ['stripComponents', 'strip'],
- ['keep-newer', 'newer'],
- ['keepNewer', 'newer'],
- ['keep-newer-files', 'newer'],
- ['keepNewerFiles', 'newer'],
- ['k', 'keep'],
- ['keep-existing', 'keep'],
- ['keepExisting', 'keep'],
- ['m', 'noMtime'],
- ['no-mtime', 'noMtime'],
- ['p', 'preserveOwner'],
- ['L', 'follow'],
- ['h', 'follow']
-])
-
-const parse = module.exports = opt => opt ? Object.keys(opt).map(k => [
- argmap.has(k) ? argmap.get(k) : k, opt[k]
-]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
diff --git a/node_modules/tar/lib/large-numbers.js b/node_modules/tar/lib/large-numbers.js
deleted file mode 100644
index ad30bc3..0000000
--- a/node_modules/tar/lib/large-numbers.js
+++ /dev/null
@@ -1,97 +0,0 @@
-'use strict'
-// Tar can encode large and negative numbers using a leading byte of
-// 0xff for negative, and 0x80 for positive.
-
-const encode = exports.encode = (num, buf) => {
- if (!Number.isSafeInteger(num))
- // The number is so large that javascript cannot represent it with integer
- // precision.
- throw Error('cannot encode number outside of javascript safe integer range')
- else if (num < 0)
- encodeNegative(num, buf)
- else
- encodePositive(num, buf)
- return buf
-}
-
-const encodePositive = (num, buf) => {
- buf[0] = 0x80
-
- for (var i = buf.length; i > 1; i--) {
- buf[i-1] = num & 0xff
- num = Math.floor(num / 0x100)
- }
-}
-
-const encodeNegative = (num, buf) => {
- buf[0] = 0xff
- var flipped = false
- num = num * -1
- for (var i = buf.length; i > 1; i--) {
- var byte = num & 0xff
- num = Math.floor(num / 0x100)
- if (flipped)
- buf[i-1] = onesComp(byte)
- else if (byte === 0)
- buf[i-1] = 0
- else {
- flipped = true
- buf[i-1] = twosComp(byte)
- }
- }
-}
-
-const parse = exports.parse = (buf) => {
- var post = buf[buf.length - 1]
- var pre = buf[0]
- var value;
- if (pre === 0x80)
- value = pos(buf.slice(1, buf.length))
- else if (pre === 0xff)
- value = twos(buf)
- else
- throw Error('invalid base256 encoding')
-
- if (!Number.isSafeInteger(value))
- // The number is so large that javascript cannot represent it with integer
- // precision.
- throw Error('parsed number outside of javascript safe integer range')
-
- return value
-}
-
-const twos = (buf) => {
- var len = buf.length
- var sum = 0
- var flipped = false
- for (var i = len - 1; i > -1; i--) {
- var byte = buf[i]
- var f
- if (flipped)
- f = onesComp(byte)
- else if (byte === 0)
- f = byte
- else {
- flipped = true
- f = twosComp(byte)
- }
- if (f !== 0)
- sum -= f * Math.pow(256, len - i - 1)
- }
- return sum
-}
-
-const pos = (buf) => {
- var len = buf.length
- var sum = 0
- for (var i = len - 1; i > -1; i--) {
- var byte = buf[i]
- if (byte !== 0)
- sum += byte * Math.pow(256, len - i - 1)
- }
- return sum
-}
-
-const onesComp = byte => (0xff ^ byte) & 0xff
-
-const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
diff --git a/node_modules/tar/lib/list.js b/node_modules/tar/lib/list.js
deleted file mode 100644
index 9da3f81..0000000
--- a/node_modules/tar/lib/list.js
+++ /dev/null
@@ -1,128 +0,0 @@
-'use strict'
-
-// XXX: This shares a lot in common with extract.js
-// maybe some DRY opportunity here?
-
-// tar -t
-const hlo = require('./high-level-opt.js')
-const Parser = require('./parse.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const path = require('path')
-
-const t = module.exports = (opt_, files, cb) => {
- if (typeof opt_ === 'function')
- cb = opt_, files = null, opt_ = {}
- else if (Array.isArray(opt_))
- files = opt_, opt_ = {}
-
- if (typeof files === 'function')
- cb = files, files = null
-
- if (!files)
- files = []
- else
- files = Array.from(files)
-
- const opt = hlo(opt_)
-
- if (opt.sync && typeof cb === 'function')
- throw new TypeError('callback not supported for sync tar functions')
-
- if (!opt.file && typeof cb === 'function')
- throw new TypeError('callback only supported with file option')
-
- if (files.length)
- filesFilter(opt, files)
-
- if (!opt.noResume)
- onentryFunction(opt)
-
- return opt.file && opt.sync ? listFileSync(opt)
- : opt.file ? listFile(opt, cb)
- : list(opt)
-}
-
-const onentryFunction = opt => {
- const onentry = opt.onentry
- opt.onentry = onentry ? e => {
- onentry(e)
- e.resume()
- } : e => e.resume()
-}
-
-// construct a filter that limits the file entries listed
-// include child entries if a dir is included
-const filesFilter = (opt, files) => {
- const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
- const filter = opt.filter
-
- const mapHas = (file, r) => {
- const root = r || path.parse(file).root || '.'
- const ret = file === root ? false
- : map.has(file) ? map.get(file)
- : mapHas(path.dirname(file), root)
-
- map.set(file, ret)
- return ret
- }
-
- opt.filter = filter
- ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
- : file => mapHas(file.replace(/\/+$/, ''))
-}
-
-const listFileSync = opt => {
- const p = list(opt)
- const file = opt.file
- let threw = true
- let fd
- try {
- const stat = fs.statSync(file)
- const readSize = opt.maxReadSize || 16*1024*1024
- if (stat.size < readSize) {
- p.end(fs.readFileSync(file))
- } else {
- let pos = 0
- const buf = Buffer.allocUnsafe(readSize)
- fd = fs.openSync(file, 'r')
- while (pos < stat.size) {
- let bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
- pos += bytesRead
- p.write(buf.slice(0, bytesRead))
- }
- p.end()
- }
- threw = false
- } finally {
- if (threw && fd)
- try { fs.closeSync(fd) } catch (er) {}
- }
-}
-
-const listFile = (opt, cb) => {
- const parse = new Parser(opt)
- const readSize = opt.maxReadSize || 16*1024*1024
-
- const file = opt.file
- const p = new Promise((resolve, reject) => {
- parse.on('error', reject)
- parse.on('end', resolve)
-
- fs.stat(file, (er, stat) => {
- if (er)
- reject(er)
- else {
- const stream = new fsm.ReadStream(file, {
- readSize: readSize,
- size: stat.size
- })
- stream.on('error', reject)
- stream.pipe(parse)
- }
- })
- })
- return cb ? p.then(cb, cb) : p
-}
-
-const list = opt => new Parser(opt)
diff --git a/node_modules/tar/lib/mkdir.js b/node_modules/tar/lib/mkdir.js
deleted file mode 100644
index 381d0e1..0000000
--- a/node_modules/tar/lib/mkdir.js
+++ /dev/null
@@ -1,206 +0,0 @@
-'use strict'
-// wrapper around mkdirp for tar's needs.
-
-// TODO: This should probably be a class, not functionally
-// passing around state in a gazillion args.
-
-const mkdirp = require('mkdirp')
-const fs = require('fs')
-const path = require('path')
-const chownr = require('chownr')
-
-class SymlinkError extends Error {
- constructor (symlink, path) {
- super('Cannot extract through symbolic link')
- this.path = path
- this.symlink = symlink
- }
-
- get name () {
- return 'SylinkError'
- }
-}
-
-class CwdError extends Error {
- constructor (path, code) {
- super(code + ': Cannot cd into \'' + path + '\'')
- this.path = path
- this.code = code
- }
-
- get name () {
- return 'CwdError'
- }
-}
-
-const mkdir = module.exports = (dir, opt, cb) => {
- // if there's any overlap between mask and mode,
- // then we'll need an explicit chmod
- const umask = opt.umask
- const mode = opt.mode | 0o0700
- const needChmod = (mode & umask) !== 0
-
- const uid = opt.uid
- const gid = opt.gid
- const doChown = typeof uid === 'number' &&
- typeof gid === 'number' &&
- ( uid !== opt.processUid || gid !== opt.processGid )
-
- const preserve = opt.preserve
- const unlink = opt.unlink
- const cache = opt.cache
- const cwd = opt.cwd
-
- const done = (er, created) => {
- if (er)
- cb(er)
- else {
- cache.set(dir, true)
- if (created && doChown)
- chownr(created, uid, gid, er => done(er))
- else if (needChmod)
- fs.chmod(dir, mode, cb)
- else
- cb()
- }
- }
-
- if (cache && cache.get(dir) === true)
- return done()
-
- if (dir === cwd)
- return fs.stat(dir, (er, st) => {
- if (er || !st.isDirectory())
- er = new CwdError(dir, er && er.code || 'ENOTDIR')
- done(er)
- })
-
- if (preserve)
- return mkdirp(dir, {mode}).then(made => done(null, made), done)
-
- const sub = path.relative(cwd, dir)
- const parts = sub.split(/\/|\\/)
- mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
-}
-
-const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
- if (!parts.length)
- return cb(null, created)
- const p = parts.shift()
- const part = base + '/' + p
- if (cache.get(part))
- return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
- fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
-}
-
-const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
- if (er) {
- if (er.path && path.dirname(er.path) === cwd &&
- (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
- return cb(new CwdError(cwd, er.code))
-
- fs.lstat(part, (statEr, st) => {
- if (statEr)
- cb(statEr)
- else if (st.isDirectory())
- mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
- else if (unlink)
- fs.unlink(part, er => {
- if (er)
- return cb(er)
- fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
- })
- else if (st.isSymbolicLink())
- return cb(new SymlinkError(part, part + '/' + parts.join('/')))
- else
- cb(er)
- })
- } else {
- created = created || part
- mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
- }
-}
-
-const mkdirSync = module.exports.sync = (dir, opt) => {
- // if there's any overlap between mask and mode,
- // then we'll need an explicit chmod
- const umask = opt.umask
- const mode = opt.mode | 0o0700
- const needChmod = (mode & umask) !== 0
-
- const uid = opt.uid
- const gid = opt.gid
- const doChown = typeof uid === 'number' &&
- typeof gid === 'number' &&
- ( uid !== opt.processUid || gid !== opt.processGid )
-
- const preserve = opt.preserve
- const unlink = opt.unlink
- const cache = opt.cache
- const cwd = opt.cwd
-
- const done = (created) => {
- cache.set(dir, true)
- if (created && doChown)
- chownr.sync(created, uid, gid)
- if (needChmod)
- fs.chmodSync(dir, mode)
- }
-
- if (cache && cache.get(dir) === true)
- return done()
-
- if (dir === cwd) {
- let ok = false
- let code = 'ENOTDIR'
- try {
- ok = fs.statSync(dir).isDirectory()
- } catch (er) {
- code = er.code
- } finally {
- if (!ok)
- throw new CwdError(dir, code)
- }
- done()
- return
- }
-
- if (preserve)
- return done(mkdirp.sync(dir, mode))
-
- const sub = path.relative(cwd, dir)
- const parts = sub.split(/\/|\\/)
- let created = null
- for (let p = parts.shift(), part = cwd;
- p && (part += '/' + p);
- p = parts.shift()) {
-
- if (cache.get(part))
- continue
-
- try {
- fs.mkdirSync(part, mode)
- created = created || part
- cache.set(part, true)
- } catch (er) {
- if (er.path && path.dirname(er.path) === cwd &&
- (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
- return new CwdError(cwd, er.code)
-
- const st = fs.lstatSync(part)
- if (st.isDirectory()) {
- cache.set(part, true)
- continue
- } else if (unlink) {
- fs.unlinkSync(part)
- fs.mkdirSync(part, mode)
- created = created || part
- cache.set(part, true)
- continue
- } else if (st.isSymbolicLink())
- return new SymlinkError(part, part + '/' + parts.join('/'))
- }
- }
-
- return done(created)
-}
diff --git a/node_modules/tar/lib/mode-fix.js b/node_modules/tar/lib/mode-fix.js
deleted file mode 100644
index c375874..0000000
--- a/node_modules/tar/lib/mode-fix.js
+++ /dev/null
@@ -1,24 +0,0 @@
-'use strict'
-module.exports = (mode, isDir, portable) => {
- mode &= 0o7777
-
- // in portable mode, use the minimum reasonable umask
- // if this system creates files with 0o664 by default
- // (as some linux distros do), then we'll write the
- // archive with 0o644 instead. Also, don't ever create
- // a file that is not readable/writable by the owner.
- if (portable) {
- mode = (mode | 0o600) &~0o22
- }
-
- // if dirs are readable, then they should be listable
- if (isDir) {
- if (mode & 0o400)
- mode |= 0o100
- if (mode & 0o40)
- mode |= 0o10
- if (mode & 0o4)
- mode |= 0o1
- }
- return mode
-}
diff --git a/node_modules/tar/lib/pack.js b/node_modules/tar/lib/pack.js
deleted file mode 100644
index 0fca4ae..0000000
--- a/node_modules/tar/lib/pack.js
+++ /dev/null
@@ -1,403 +0,0 @@
-'use strict'
-
-// A readable tar stream creator
-// Technically, this is a transform stream that you write paths into,
-// and tar format comes out of.
-// The `add()` method is like `write()` but returns this,
-// and end() return `this` as well, so you can
-// do `new Pack(opt).add('files').add('dir').end().pipe(output)
-// You could also do something like:
-// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
-
-class PackJob {
- constructor (path, absolute) {
- this.path = path || './'
- this.absolute = absolute
- this.entry = null
- this.stat = null
- this.readdir = null
- this.pending = false
- this.ignore = false
- this.piped = false
- }
-}
-
-const MiniPass = require('minipass')
-const zlib = require('minizlib')
-const ReadEntry = require('./read-entry.js')
-const WriteEntry = require('./write-entry.js')
-const WriteEntrySync = WriteEntry.Sync
-const WriteEntryTar = WriteEntry.Tar
-const Yallist = require('yallist')
-const EOF = Buffer.alloc(1024)
-const ONSTAT = Symbol('onStat')
-const ENDED = Symbol('ended')
-const QUEUE = Symbol('queue')
-const CURRENT = Symbol('current')
-const PROCESS = Symbol('process')
-const PROCESSING = Symbol('processing')
-const PROCESSJOB = Symbol('processJob')
-const JOBS = Symbol('jobs')
-const JOBDONE = Symbol('jobDone')
-const ADDFSENTRY = Symbol('addFSEntry')
-const ADDTARENTRY = Symbol('addTarEntry')
-const STAT = Symbol('stat')
-const READDIR = Symbol('readdir')
-const ONREADDIR = Symbol('onreaddir')
-const PIPE = Symbol('pipe')
-const ENTRY = Symbol('entry')
-const ENTRYOPT = Symbol('entryOpt')
-const WRITEENTRYCLASS = Symbol('writeEntryClass')
-const WRITE = Symbol('write')
-const ONDRAIN = Symbol('ondrain')
-
-const fs = require('fs')
-const path = require('path')
-const warner = require('./warn-mixin.js')
-
-const Pack = warner(class Pack extends MiniPass {
- constructor (opt) {
- super(opt)
- opt = opt || Object.create(null)
- this.opt = opt
- this.file = opt.file || ''
- this.cwd = opt.cwd || process.cwd()
- this.maxReadSize = opt.maxReadSize
- this.preservePaths = !!opt.preservePaths
- this.strict = !!opt.strict
- this.noPax = !!opt.noPax
- this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '')
- this.linkCache = opt.linkCache || new Map()
- this.statCache = opt.statCache || new Map()
- this.readdirCache = opt.readdirCache || new Map()
-
- this[WRITEENTRYCLASS] = WriteEntry
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
-
- this.portable = !!opt.portable
- this.zip = null
- if (opt.gzip) {
- if (typeof opt.gzip !== 'object')
- opt.gzip = {}
- if (this.portable)
- opt.gzip.portable = true
- this.zip = new zlib.Gzip(opt.gzip)
- this.zip.on('data', chunk => super.write(chunk))
- this.zip.on('end', _ => super.end())
- this.zip.on('drain', _ => this[ONDRAIN]())
- this.on('resume', _ => this.zip.resume())
- } else
- this.on('drain', this[ONDRAIN])
-
- this.noDirRecurse = !!opt.noDirRecurse
- this.follow = !!opt.follow
- this.noMtime = !!opt.noMtime
- this.mtime = opt.mtime || null
-
- this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
-
- this[QUEUE] = new Yallist
- this[JOBS] = 0
- this.jobs = +opt.jobs || 4
- this[PROCESSING] = false
- this[ENDED] = false
- }
-
- [WRITE] (chunk) {
- return super.write(chunk)
- }
-
- add (path) {
- this.write(path)
- return this
- }
-
- end (path) {
- if (path)
- this.write(path)
- this[ENDED] = true
- this[PROCESS]()
- return this
- }
-
- write (path) {
- if (this[ENDED])
- throw new Error('write after end')
-
- if (path instanceof ReadEntry)
- this[ADDTARENTRY](path)
- else
- this[ADDFSENTRY](path)
- return this.flowing
- }
-
- [ADDTARENTRY] (p) {
- const absolute = path.resolve(this.cwd, p.path)
- if (this.prefix)
- p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '')
-
- // in this case, we don't have to wait for the stat
- if (!this.filter(p.path, p))
- p.resume()
- else {
- const job = new PackJob(p.path, absolute, false)
- job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
- job.entry.on('end', _ => this[JOBDONE](job))
- this[JOBS] += 1
- this[QUEUE].push(job)
- }
-
- this[PROCESS]()
- }
-
- [ADDFSENTRY] (p) {
- const absolute = path.resolve(this.cwd, p)
- if (this.prefix)
- p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '')
-
- this[QUEUE].push(new PackJob(p, absolute))
- this[PROCESS]()
- }
-
- [STAT] (job) {
- job.pending = true
- this[JOBS] += 1
- const stat = this.follow ? 'stat' : 'lstat'
- fs[stat](job.absolute, (er, stat) => {
- job.pending = false
- this[JOBS] -= 1
- if (er)
- this.emit('error', er)
- else
- this[ONSTAT](job, stat)
- })
- }
-
- [ONSTAT] (job, stat) {
- this.statCache.set(job.absolute, stat)
- job.stat = stat
-
- // now we have the stat, we can filter it.
- if (!this.filter(job.path, stat))
- job.ignore = true
-
- this[PROCESS]()
- }
-
- [READDIR] (job) {
- job.pending = true
- this[JOBS] += 1
- fs.readdir(job.absolute, (er, entries) => {
- job.pending = false
- this[JOBS] -= 1
- if (er)
- return this.emit('error', er)
- this[ONREADDIR](job, entries)
- })
- }
-
- [ONREADDIR] (job, entries) {
- this.readdirCache.set(job.absolute, entries)
- job.readdir = entries
- this[PROCESS]()
- }
-
- [PROCESS] () {
- if (this[PROCESSING])
- return
-
- this[PROCESSING] = true
- for (let w = this[QUEUE].head;
- w !== null && this[JOBS] < this.jobs;
- w = w.next) {
- this[PROCESSJOB](w.value)
- if (w.value.ignore) {
- const p = w.next
- this[QUEUE].removeNode(w)
- w.next = p
- }
- }
-
- this[PROCESSING] = false
-
- if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
- if (this.zip)
- this.zip.end(EOF)
- else {
- super.write(EOF)
- super.end()
- }
- }
- }
-
- get [CURRENT] () {
- return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
- }
-
- [JOBDONE] (job) {
- this[QUEUE].shift()
- this[JOBS] -= 1
- this[PROCESS]()
- }
-
- [PROCESSJOB] (job) {
- if (job.pending)
- return
-
- if (job.entry) {
- if (job === this[CURRENT] && !job.piped)
- this[PIPE](job)
- return
- }
-
- if (!job.stat) {
- if (this.statCache.has(job.absolute))
- this[ONSTAT](job, this.statCache.get(job.absolute))
- else
- this[STAT](job)
- }
- if (!job.stat)
- return
-
- // filtered out!
- if (job.ignore)
- return
-
- if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
- if (this.readdirCache.has(job.absolute))
- this[ONREADDIR](job, this.readdirCache.get(job.absolute))
- else
- this[READDIR](job)
- if (!job.readdir)
- return
- }
-
- // we know it doesn't have an entry, because that got checked above
- job.entry = this[ENTRY](job)
- if (!job.entry) {
- job.ignore = true
- return
- }
-
- if (job === this[CURRENT] && !job.piped)
- this[PIPE](job)
- }
-
- [ENTRYOPT] (job) {
- return {
- onwarn: (code, msg, data) => this.warn(code, msg, data),
- noPax: this.noPax,
- cwd: this.cwd,
- absolute: job.absolute,
- preservePaths: this.preservePaths,
- maxReadSize: this.maxReadSize,
- strict: this.strict,
- portable: this.portable,
- linkCache: this.linkCache,
- statCache: this.statCache,
- noMtime: this.noMtime,
- mtime: this.mtime
- }
- }
-
- [ENTRY] (job) {
- this[JOBS] += 1
- try {
- return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
- .on('end', () => this[JOBDONE](job))
- .on('error', er => this.emit('error', er))
- } catch (er) {
- this.emit('error', er)
- }
- }
-
- [ONDRAIN] () {
- if (this[CURRENT] && this[CURRENT].entry)
- this[CURRENT].entry.resume()
- }
-
- // like .pipe() but using super, because our write() is special
- [PIPE] (job) {
- job.piped = true
-
- if (job.readdir)
- job.readdir.forEach(entry => {
- const p = this.prefix ?
- job.path.slice(this.prefix.length + 1) || './'
- : job.path
-
- const base = p === './' ? '' : p.replace(/\/*$/, '/')
- this[ADDFSENTRY](base + entry)
- })
-
- const source = job.entry
- const zip = this.zip
-
- if (zip)
- source.on('data', chunk => {
- if (!zip.write(chunk))
- source.pause()
- })
- else
- source.on('data', chunk => {
- if (!super.write(chunk))
- source.pause()
- })
- }
-
- pause () {
- if (this.zip)
- this.zip.pause()
- return super.pause()
- }
-})
-
-class PackSync extends Pack {
- constructor (opt) {
- super(opt)
- this[WRITEENTRYCLASS] = WriteEntrySync
- }
-
- // pause/resume are no-ops in sync streams.
- pause () {}
- resume () {}
-
- [STAT] (job) {
- const stat = this.follow ? 'statSync' : 'lstatSync'
- this[ONSTAT](job, fs[stat](job.absolute))
- }
-
- [READDIR] (job, stat) {
- this[ONREADDIR](job, fs.readdirSync(job.absolute))
- }
-
- // gotta get it all in this tick
- [PIPE] (job) {
- const source = job.entry
- const zip = this.zip
-
- if (job.readdir)
- job.readdir.forEach(entry => {
- const p = this.prefix ?
- job.path.slice(this.prefix.length + 1) || './'
- : job.path
-
- const base = p === './' ? '' : p.replace(/\/*$/, '/')
- this[ADDFSENTRY](base + entry)
- })
-
- if (zip)
- source.on('data', chunk => {
- zip.write(chunk)
- })
- else
- source.on('data', chunk => {
- super[WRITE](chunk)
- })
- }
-}
-
-Pack.Sync = PackSync
-
-module.exports = Pack
diff --git a/node_modules/tar/lib/parse.js b/node_modules/tar/lib/parse.js
deleted file mode 100644
index d9a49ad..0000000
--- a/node_modules/tar/lib/parse.js
+++ /dev/null
@@ -1,483 +0,0 @@
-'use strict'
-
-// this[BUFFER] is the remainder of a chunk if we're waiting for
-// the full 512 bytes of a header to come in. We will Buffer.concat()
-// it to the next write(), which is a mem copy, but a small one.
-//
-// this[QUEUE] is a Yallist of entries that haven't been emitted
-// yet this can only get filled up if the user keeps write()ing after
-// a write() returns false, or does a write() with more than one entry
-//
-// We don't buffer chunks, we always parse them and either create an
-// entry, or push it into the active entry. The ReadEntry class knows
-// to throw data away if .ignore=true
-//
-// Shift entry off the buffer when it emits 'end', and emit 'entry' for
-// the next one in the list.
-//
-// At any time, we're pushing body chunks into the entry at WRITEENTRY,
-// and waiting for 'end' on the entry at READENTRY
-//
-// ignored entries get .resume() called on them straight away
-
-const warner = require('./warn-mixin.js')
-const path = require('path')
-const Header = require('./header.js')
-const EE = require('events')
-const Yallist = require('yallist')
-const maxMetaEntrySize = 1024 * 1024
-const Entry = require('./read-entry.js')
-const Pax = require('./pax.js')
-const zlib = require('minizlib')
-
-const gzipHeader = Buffer.from([0x1f, 0x8b])
-const STATE = Symbol('state')
-const WRITEENTRY = Symbol('writeEntry')
-const READENTRY = Symbol('readEntry')
-const NEXTENTRY = Symbol('nextEntry')
-const PROCESSENTRY = Symbol('processEntry')
-const EX = Symbol('extendedHeader')
-const GEX = Symbol('globalExtendedHeader')
-const META = Symbol('meta')
-const EMITMETA = Symbol('emitMeta')
-const BUFFER = Symbol('buffer')
-const QUEUE = Symbol('queue')
-const ENDED = Symbol('ended')
-const EMITTEDEND = Symbol('emittedEnd')
-const EMIT = Symbol('emit')
-const UNZIP = Symbol('unzip')
-const CONSUMECHUNK = Symbol('consumeChunk')
-const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
-const CONSUMEBODY = Symbol('consumeBody')
-const CONSUMEMETA = Symbol('consumeMeta')
-const CONSUMEHEADER = Symbol('consumeHeader')
-const CONSUMING = Symbol('consuming')
-const BUFFERCONCAT = Symbol('bufferConcat')
-const MAYBEEND = Symbol('maybeEnd')
-const WRITING = Symbol('writing')
-const ABORTED = Symbol('aborted')
-const DONE = Symbol('onDone')
-const SAW_VALID_ENTRY = Symbol('sawValidEntry')
-const SAW_NULL_BLOCK = Symbol('sawNullBlock')
-const SAW_EOF = Symbol('sawEOF')
-
-const noop = _ => true
-
-module.exports = warner(class Parser extends EE {
- constructor (opt) {
- opt = opt || {}
- super(opt)
-
- this.file = opt.file || ''
-
- // set to boolean false when an entry starts. 1024 bytes of \0
- // is technically a valid tarball, albeit a boring one.
- this[SAW_VALID_ENTRY] = null
-
- // these BADARCHIVE errors can't be detected early. listen on DONE.
- this.on(DONE, _ => {
- if (this[STATE] === 'begin' || this[SAW_VALID_ENTRY] === false) {
- // either less than 1 block of data, or all entries were invalid.
- // Either way, probably not even a tarball.
- this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format')
- }
- })
-
- if (opt.ondone)
- this.on(DONE, opt.ondone)
- else
- this.on(DONE, _ => {
- this.emit('prefinish')
- this.emit('finish')
- this.emit('end')
- this.emit('close')
- })
-
- this.strict = !!opt.strict
- this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
- this.filter = typeof opt.filter === 'function' ? opt.filter : noop
-
- // have to set this so that streams are ok piping into it
- this.writable = true
- this.readable = false
-
- this[QUEUE] = new Yallist()
- this[BUFFER] = null
- this[READENTRY] = null
- this[WRITEENTRY] = null
- this[STATE] = 'begin'
- this[META] = ''
- this[EX] = null
- this[GEX] = null
- this[ENDED] = false
- this[UNZIP] = null
- this[ABORTED] = false
- this[SAW_NULL_BLOCK] = false
- this[SAW_EOF] = false
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
- if (typeof opt.onentry === 'function')
- this.on('entry', opt.onentry)
- }
-
- [CONSUMEHEADER] (chunk, position) {
- if (this[SAW_VALID_ENTRY] === null)
- this[SAW_VALID_ENTRY] = false
- let header
- try {
- header = new Header(chunk, position, this[EX], this[GEX])
- } catch (er) {
- return this.warn('TAR_ENTRY_INVALID', er)
- }
-
- if (header.nullBlock) {
- if (this[SAW_NULL_BLOCK]) {
- this[SAW_EOF] = true
- // ending an archive with no entries. pointless, but legal.
- if (this[STATE] === 'begin')
- this[STATE] = 'header'
- this[EMIT]('eof')
- } else {
- this[SAW_NULL_BLOCK] = true
- this[EMIT]('nullBlock')
- }
- } else {
- this[SAW_NULL_BLOCK] = false
- if (!header.cksumValid)
- this.warn('TAR_ENTRY_INVALID', 'checksum failure', {header})
- else if (!header.path)
- this.warn('TAR_ENTRY_INVALID', 'path is required', {header})
- else {
- const type = header.type
- if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
- this.warn('TAR_ENTRY_INVALID', 'linkpath required', {header})
- else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
- this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', {header})
- else {
- const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
-
- // we do this for meta & ignored entries as well, because they
- // are still valid tar, or else we wouldn't know to ignore them
- if (!this[SAW_VALID_ENTRY]) {
- if (entry.remain) {
- // this might be the one!
- const onend = () => {
- if (!entry.invalid)
- this[SAW_VALID_ENTRY] = true
- }
- entry.on('end', onend)
- } else {
- this[SAW_VALID_ENTRY] = true
- }
- }
-
- if (entry.meta) {
- if (entry.size > this.maxMetaEntrySize) {
- entry.ignore = true
- this[EMIT]('ignoredEntry', entry)
- this[STATE] = 'ignore'
- entry.resume()
- } else if (entry.size > 0) {
- this[META] = ''
- entry.on('data', c => this[META] += c)
- this[STATE] = 'meta'
- }
- } else {
- this[EX] = null
- entry.ignore = entry.ignore || !this.filter(entry.path, entry)
-
- if (entry.ignore) {
- // probably valid, just not something we care about
- this[EMIT]('ignoredEntry', entry)
- this[STATE] = entry.remain ? 'ignore' : 'header'
- entry.resume()
- } else {
- if (entry.remain)
- this[STATE] = 'body'
- else {
- this[STATE] = 'header'
- entry.end()
- }
-
- if (!this[READENTRY]) {
- this[QUEUE].push(entry)
- this[NEXTENTRY]()
- } else
- this[QUEUE].push(entry)
- }
- }
- }
- }
- }
- }
-
- [PROCESSENTRY] (entry) {
- let go = true
-
- if (!entry) {
- this[READENTRY] = null
- go = false
- } else if (Array.isArray(entry))
- this.emit.apply(this, entry)
- else {
- this[READENTRY] = entry
- this.emit('entry', entry)
- if (!entry.emittedEnd) {
- entry.on('end', _ => this[NEXTENTRY]())
- go = false
- }
- }
-
- return go
- }
-
- [NEXTENTRY] () {
- do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
-
- if (!this[QUEUE].length) {
- // At this point, there's nothing in the queue, but we may have an
- // entry which is being consumed (readEntry).
- // If we don't, then we definitely can handle more data.
- // If we do, and either it's flowing, or it has never had any data
- // written to it, then it needs more.
- // The only other possibility is that it has returned false from a
- // write() call, so we wait for the next drain to continue.
- const re = this[READENTRY]
- const drainNow = !re || re.flowing || re.size === re.remain
- if (drainNow) {
- if (!this[WRITING])
- this.emit('drain')
- } else
- re.once('drain', _ => this.emit('drain'))
- }
- }
-
- [CONSUMEBODY] (chunk, position) {
- // write up to but no more than writeEntry.blockRemain
- const entry = this[WRITEENTRY]
- const br = entry.blockRemain
- const c = (br >= chunk.length && position === 0) ? chunk
- : chunk.slice(position, position + br)
-
- entry.write(c)
-
- if (!entry.blockRemain) {
- this[STATE] = 'header'
- this[WRITEENTRY] = null
- entry.end()
- }
-
- return c.length
- }
-
- [CONSUMEMETA] (chunk, position) {
- const entry = this[WRITEENTRY]
- const ret = this[CONSUMEBODY](chunk, position)
-
- // if we finished, then the entry is reset
- if (!this[WRITEENTRY])
- this[EMITMETA](entry)
-
- return ret
- }
-
- [EMIT] (ev, data, extra) {
- if (!this[QUEUE].length && !this[READENTRY])
- this.emit(ev, data, extra)
- else
- this[QUEUE].push([ev, data, extra])
- }
-
- [EMITMETA] (entry) {
- this[EMIT]('meta', this[META])
- switch (entry.type) {
- case 'ExtendedHeader':
- case 'OldExtendedHeader':
- this[EX] = Pax.parse(this[META], this[EX], false)
- break
-
- case 'GlobalExtendedHeader':
- this[GEX] = Pax.parse(this[META], this[GEX], true)
- break
-
- case 'NextFileHasLongPath':
- case 'OldGnuLongPath':
- this[EX] = this[EX] || Object.create(null)
- this[EX].path = this[META].replace(/\0.*/, '')
- break
-
- case 'NextFileHasLongLinkpath':
- this[EX] = this[EX] || Object.create(null)
- this[EX].linkpath = this[META].replace(/\0.*/, '')
- break
-
- /* istanbul ignore next */
- default: throw new Error('unknown meta: ' + entry.type)
- }
- }
-
- abort (error) {
- this[ABORTED] = true
- this.emit('abort', error)
- // always throws, even in non-strict mode
- this.warn('TAR_ABORT', error, { recoverable: false })
- }
-
- write (chunk) {
- if (this[ABORTED])
- return
-
- // first write, might be gzipped
- if (this[UNZIP] === null && chunk) {
- if (this[BUFFER]) {
- chunk = Buffer.concat([this[BUFFER], chunk])
- this[BUFFER] = null
- }
- if (chunk.length < gzipHeader.length) {
- this[BUFFER] = chunk
- return true
- }
- for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
- if (chunk[i] !== gzipHeader[i])
- this[UNZIP] = false
- }
- if (this[UNZIP] === null) {
- const ended = this[ENDED]
- this[ENDED] = false
- this[UNZIP] = new zlib.Unzip()
- this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
- this[UNZIP].on('error', er => this.abort(er))
- this[UNZIP].on('end', _ => {
- this[ENDED] = true
- this[CONSUMECHUNK]()
- })
- this[WRITING] = true
- const ret = this[UNZIP][ended ? 'end' : 'write' ](chunk)
- this[WRITING] = false
- return ret
- }
- }
-
- this[WRITING] = true
- if (this[UNZIP])
- this[UNZIP].write(chunk)
- else
- this[CONSUMECHUNK](chunk)
- this[WRITING] = false
-
- // return false if there's a queue, or if the current entry isn't flowing
- const ret =
- this[QUEUE].length ? false :
- this[READENTRY] ? this[READENTRY].flowing :
- true
-
- // if we have no queue, then that means a clogged READENTRY
- if (!ret && !this[QUEUE].length)
- this[READENTRY].once('drain', _ => this.emit('drain'))
-
- return ret
- }
-
- [BUFFERCONCAT] (c) {
- if (c && !this[ABORTED])
- this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
- }
-
- [MAYBEEND] () {
- if (this[ENDED] &&
- !this[EMITTEDEND] &&
- !this[ABORTED] &&
- !this[CONSUMING]) {
- this[EMITTEDEND] = true
- const entry = this[WRITEENTRY]
- if (entry && entry.blockRemain) {
- // truncated, likely a damaged file
- const have = this[BUFFER] ? this[BUFFER].length : 0
- this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${
- entry.blockRemain} more bytes, only ${have} available)`, {entry})
- if (this[BUFFER])
- entry.write(this[BUFFER])
- entry.end()
- }
- this[EMIT](DONE)
- }
- }
-
- [CONSUMECHUNK] (chunk) {
- if (this[CONSUMING])
- this[BUFFERCONCAT](chunk)
- else if (!chunk && !this[BUFFER])
- this[MAYBEEND]()
- else {
- this[CONSUMING] = true
- if (this[BUFFER]) {
- this[BUFFERCONCAT](chunk)
- const c = this[BUFFER]
- this[BUFFER] = null
- this[CONSUMECHUNKSUB](c)
- } else {
- this[CONSUMECHUNKSUB](chunk)
- }
-
- while (this[BUFFER] &&
- this[BUFFER].length >= 512 &&
- !this[ABORTED] &&
- !this[SAW_EOF]) {
- const c = this[BUFFER]
- this[BUFFER] = null
- this[CONSUMECHUNKSUB](c)
- }
- this[CONSUMING] = false
- }
-
- if (!this[BUFFER] || this[ENDED])
- this[MAYBEEND]()
- }
-
- [CONSUMECHUNKSUB] (chunk) {
- // we know that we are in CONSUMING mode, so anything written goes into
- // the buffer. Advance the position and put any remainder in the buffer.
- let position = 0
- let length = chunk.length
- while (position + 512 <= length && !this[ABORTED] && !this[SAW_EOF]) {
- switch (this[STATE]) {
- case 'begin':
- case 'header':
- this[CONSUMEHEADER](chunk, position)
- position += 512
- break
-
- case 'ignore':
- case 'body':
- position += this[CONSUMEBODY](chunk, position)
- break
-
- case 'meta':
- position += this[CONSUMEMETA](chunk, position)
- break
-
- /* istanbul ignore next */
- default:
- throw new Error('invalid state: ' + this[STATE])
- }
- }
-
- if (position < length) {
- if (this[BUFFER])
- this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
- else
- this[BUFFER] = chunk.slice(position)
- }
- }
-
- end (chunk) {
- if (!this[ABORTED]) {
- if (this[UNZIP])
- this[UNZIP].end(chunk)
- else {
- this[ENDED] = true
- this.write(chunk)
- }
- }
- }
-})
diff --git a/node_modules/tar/lib/path-reservations.js b/node_modules/tar/lib/path-reservations.js
deleted file mode 100644
index 3cf0c2c..0000000
--- a/node_modules/tar/lib/path-reservations.js
+++ /dev/null
@@ -1,125 +0,0 @@
-// A path exclusive reservation system
-// reserve([list, of, paths], fn)
-// When the fn is first in line for all its paths, it
-// is called with a cb that clears the reservation.
-//
-// Used by async unpack to avoid clobbering paths in use,
-// while still allowing maximal safe parallelization.
-
-const assert = require('assert')
-
-module.exports = () => {
- // path => [function or Set]
- // A Set object means a directory reservation
- // A fn is a direct reservation on that path
- const queues = new Map()
-
- // fn => {paths:[path,...], dirs:[path, ...]}
- const reservations = new Map()
-
- // return a set of parent dirs for a given path
- const { join } = require('path')
- const getDirs = path =>
- join(path).split(/[\\\/]/).slice(0, -1).reduce((set, path) =>
- set.length ? set.concat(join(set[set.length-1], path)) : [path], [])
-
- // functions currently running
- const running = new Set()
-
- // return the queues for each path the function cares about
- // fn => {paths, dirs}
- const getQueues = fn => {
- const res = reservations.get(fn)
- /* istanbul ignore if - unpossible */
- if (!res)
- throw new Error('function does not have any path reservations')
- return {
- paths: res.paths.map(path => queues.get(path)),
- dirs: [...res.dirs].map(path => queues.get(path)),
- }
- }
-
- // check if fn is first in line for all its paths, and is
- // included in the first set for all its dir queues
- const check = fn => {
- const {paths, dirs} = getQueues(fn)
- return paths.every(q => q[0] === fn) &&
- dirs.every(q => q[0] instanceof Set && q[0].has(fn))
- }
-
- // run the function if it's first in line and not already running
- const run = fn => {
- if (running.has(fn) || !check(fn))
- return false
- running.add(fn)
- fn(() => clear(fn))
- return true
- }
-
- const clear = fn => {
- if (!running.has(fn))
- return false
-
- const { paths, dirs } = reservations.get(fn)
- const next = new Set()
-
- paths.forEach(path => {
- const q = queues.get(path)
- assert.equal(q[0], fn)
- if (q.length === 1)
- queues.delete(path)
- else {
- q.shift()
- if (typeof q[0] === 'function')
- next.add(q[0])
- else
- q[0].forEach(fn => next.add(fn))
- }
- })
-
- dirs.forEach(dir => {
- const q = queues.get(dir)
- assert(q[0] instanceof Set)
- if (q[0].size === 1 && q.length === 1) {
- queues.delete(dir)
- } else if (q[0].size === 1) {
- q.shift()
-
- // must be a function or else the Set would've been reused
- next.add(q[0])
- } else
- q[0].delete(fn)
- })
- running.delete(fn)
-
- next.forEach(fn => run(fn))
- return true
- }
-
- const reserve = (paths, fn) => {
- const dirs = new Set(
- paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))
- )
- reservations.set(fn, {dirs, paths})
- paths.forEach(path => {
- const q = queues.get(path)
- if (!q)
- queues.set(path, [fn])
- else
- q.push(fn)
- })
- dirs.forEach(dir => {
- const q = queues.get(dir)
- if (!q)
- queues.set(dir, [new Set([fn])])
- else if (q[q.length-1] instanceof Set)
- q[q.length-1].add(fn)
- else
- q.push(new Set([fn]))
- })
-
- return run(fn)
- }
-
- return { check, reserve }
-}
diff --git a/node_modules/tar/lib/pax.js b/node_modules/tar/lib/pax.js
deleted file mode 100644
index 214a459..0000000
--- a/node_modules/tar/lib/pax.js
+++ /dev/null
@@ -1,145 +0,0 @@
-'use strict'
-const Header = require('./header.js')
-const path = require('path')
-
-class Pax {
- constructor (obj, global) {
- this.atime = obj.atime || null
- this.charset = obj.charset || null
- this.comment = obj.comment || null
- this.ctime = obj.ctime || null
- this.gid = obj.gid || null
- this.gname = obj.gname || null
- this.linkpath = obj.linkpath || null
- this.mtime = obj.mtime || null
- this.path = obj.path || null
- this.size = obj.size || null
- this.uid = obj.uid || null
- this.uname = obj.uname || null
- this.dev = obj.dev || null
- this.ino = obj.ino || null
- this.nlink = obj.nlink || null
- this.global = global || false
- }
-
- encode () {
- const body = this.encodeBody()
- if (body === '')
- return null
-
- const bodyLen = Buffer.byteLength(body)
- // round up to 512 bytes
- // add 512 for header
- const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
- const buf = Buffer.allocUnsafe(bufLen)
-
- // 0-fill the header section, it might not hit every field
- for (let i = 0; i < 512; i++) {
- buf[i] = 0
- }
-
- new Header({
- // XXX split the path
- // then the path should be PaxHeader + basename, but less than 99,
- // prepend with the dirname
- path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
- mode: this.mode || 0o644,
- uid: this.uid || null,
- gid: this.gid || null,
- size: bodyLen,
- mtime: this.mtime || null,
- type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
- linkpath: '',
- uname: this.uname || '',
- gname: this.gname || '',
- devmaj: 0,
- devmin: 0,
- atime: this.atime || null,
- ctime: this.ctime || null
- }).encode(buf)
-
- buf.write(body, 512, bodyLen, 'utf8')
-
- // null pad after the body
- for (let i = bodyLen + 512; i < buf.length; i++) {
- buf[i] = 0
- }
-
- return buf
- }
-
- encodeBody () {
- return (
- this.encodeField('path') +
- this.encodeField('ctime') +
- this.encodeField('atime') +
- this.encodeField('dev') +
- this.encodeField('ino') +
- this.encodeField('nlink') +
- this.encodeField('charset') +
- this.encodeField('comment') +
- this.encodeField('gid') +
- this.encodeField('gname') +
- this.encodeField('linkpath') +
- this.encodeField('mtime') +
- this.encodeField('size') +
- this.encodeField('uid') +
- this.encodeField('uname')
- )
- }
-
- encodeField (field) {
- if (this[field] === null || this[field] === undefined)
- return ''
- const v = this[field] instanceof Date ? this[field].getTime() / 1000
- : this[field]
- const s = ' ' +
- (field === 'dev' || field === 'ino' || field === 'nlink'
- ? 'SCHILY.' : '') +
- field + '=' + v + '\n'
- const byteLen = Buffer.byteLength(s)
- // the digits includes the length of the digits in ascii base-10
- // so if it's 9 characters, then adding 1 for the 9 makes it 10
- // which makes it 11 chars.
- let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
- if (byteLen + digits >= Math.pow(10, digits))
- digits += 1
- const len = digits + byteLen
- return len + s
- }
-}
-
-Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
-
-const merge = (a, b) =>
- b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
-
-const parseKV = string =>
- string
- .replace(/\n$/, '')
- .split('\n')
- .reduce(parseKVLine, Object.create(null))
-
-const parseKVLine = (set, line) => {
- const n = parseInt(line, 10)
-
- // XXX Values with \n in them will fail this.
- // Refactor to not be a naive line-by-line parse.
- if (n !== Buffer.byteLength(line) + 1)
- return set
-
- line = line.substr((n + ' ').length)
- const kv = line.split('=')
- const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
- if (!k)
- return set
-
- const v = kv.join('=')
- set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
- ? new Date(v * 1000)
- : /^[0-9]+$/.test(v) ? +v
- : v
- return set
-}
-
-module.exports = Pax
diff --git a/node_modules/tar/lib/read-entry.js b/node_modules/tar/lib/read-entry.js
deleted file mode 100644
index 8acee94..0000000
--- a/node_modules/tar/lib/read-entry.js
+++ /dev/null
@@ -1,98 +0,0 @@
-'use strict'
-const types = require('./types.js')
-const MiniPass = require('minipass')
-
-const SLURP = Symbol('slurp')
-module.exports = class ReadEntry extends MiniPass {
- constructor (header, ex, gex) {
- super()
- // read entries always start life paused. this is to avoid the
- // situation where Minipass's auto-ending empty streams results
- // in an entry ending before we're ready for it.
- this.pause()
- this.extended = ex
- this.globalExtended = gex
- this.header = header
- this.startBlockSize = 512 * Math.ceil(header.size / 512)
- this.blockRemain = this.startBlockSize
- this.remain = header.size
- this.type = header.type
- this.meta = false
- this.ignore = false
- switch (this.type) {
- case 'File':
- case 'OldFile':
- case 'Link':
- case 'SymbolicLink':
- case 'CharacterDevice':
- case 'BlockDevice':
- case 'Directory':
- case 'FIFO':
- case 'ContiguousFile':
- case 'GNUDumpDir':
- break
-
- case 'NextFileHasLongLinkpath':
- case 'NextFileHasLongPath':
- case 'OldGnuLongPath':
- case 'GlobalExtendedHeader':
- case 'ExtendedHeader':
- case 'OldExtendedHeader':
- this.meta = true
- break
-
- // NOTE: gnutar and bsdtar treat unrecognized types as 'File'
- // it may be worth doing the same, but with a warning.
- default:
- this.ignore = true
- }
-
- this.path = header.path
- this.mode = header.mode
- if (this.mode)
- this.mode = this.mode & 0o7777
- this.uid = header.uid
- this.gid = header.gid
- this.uname = header.uname
- this.gname = header.gname
- this.size = header.size
- this.mtime = header.mtime
- this.atime = header.atime
- this.ctime = header.ctime
- this.linkpath = header.linkpath
- this.uname = header.uname
- this.gname = header.gname
-
- if (ex) this[SLURP](ex)
- if (gex) this[SLURP](gex, true)
- }
-
- write (data) {
- const writeLen = data.length
- if (writeLen > this.blockRemain)
- throw new Error('writing more to entry than is appropriate')
-
- const r = this.remain
- const br = this.blockRemain
- this.remain = Math.max(0, r - writeLen)
- this.blockRemain = Math.max(0, br - writeLen)
- if (this.ignore)
- return true
-
- if (r >= writeLen)
- return super.write(data)
-
- // r < writeLen
- return super.write(data.slice(0, r))
- }
-
- [SLURP] (ex, global) {
- for (let k in ex) {
- // we slurp in everything except for the path attribute in
- // a global extended header, because that's weird.
- if (ex[k] !== null && ex[k] !== undefined &&
- !(global && k === 'path'))
- this[k] = ex[k]
- }
- }
-}
diff --git a/node_modules/tar/lib/replace.js b/node_modules/tar/lib/replace.js
deleted file mode 100644
index 44126d1..0000000
--- a/node_modules/tar/lib/replace.js
+++ /dev/null
@@ -1,219 +0,0 @@
-'use strict'
-
-// tar -r
-const hlo = require('./high-level-opt.js')
-const Pack = require('./pack.js')
-const Parse = require('./parse.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const t = require('./list.js')
-const path = require('path')
-
-// starting at the head of the file, read a Header
-// If the checksum is invalid, that's our position to start writing
-// If it is, jump forward by the specified size (round up to 512)
-// and try again.
-// Write the new Pack stream starting there.
-
-const Header = require('./header.js')
-
-const r = module.exports = (opt_, files, cb) => {
- const opt = hlo(opt_)
-
- if (!opt.file)
- throw new TypeError('file is required')
-
- if (opt.gzip)
- throw new TypeError('cannot append to compressed archives')
-
- if (!files || !Array.isArray(files) || !files.length)
- throw new TypeError('no files or directories specified')
-
- files = Array.from(files)
-
- return opt.sync ? replaceSync(opt, files)
- : replace(opt, files, cb)
-}
-
-const replaceSync = (opt, files) => {
- const p = new Pack.Sync(opt)
-
- let threw = true
- let fd
- let position
-
- try {
- try {
- fd = fs.openSync(opt.file, 'r+')
- } catch (er) {
- if (er.code === 'ENOENT')
- fd = fs.openSync(opt.file, 'w+')
- else
- throw er
- }
-
- const st = fs.fstatSync(fd)
- const headBuf = Buffer.alloc(512)
-
- POSITION: for (position = 0; position < st.size; position += 512) {
- for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
- bytes = fs.readSync(
- fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
- )
-
- if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
- throw new Error('cannot append to compressed archives')
-
- if (!bytes)
- break POSITION
- }
-
- let h = new Header(headBuf)
- if (!h.cksumValid)
- break
- let entryBlockSize = 512 * Math.ceil(h.size / 512)
- if (position + entryBlockSize + 512 > st.size)
- break
- // the 512 for the header we just parsed will be added as well
- // also jump ahead all the blocks for the body
- position += entryBlockSize
- if (opt.mtimeCache)
- opt.mtimeCache.set(h.path, h.mtime)
- }
- threw = false
-
- streamSync(opt, p, position, fd, files)
- } finally {
- if (threw)
- try { fs.closeSync(fd) } catch (er) {}
- }
-}
-
-const streamSync = (opt, p, position, fd, files) => {
- const stream = new fsm.WriteStreamSync(opt.file, {
- fd: fd,
- start: position
- })
- p.pipe(stream)
- addFilesSync(p, files)
-}
-
-const replace = (opt, files, cb) => {
- files = Array.from(files)
- const p = new Pack(opt)
-
- const getPos = (fd, size, cb_) => {
- const cb = (er, pos) => {
- if (er)
- fs.close(fd, _ => cb_(er))
- else
- cb_(null, pos)
- }
-
- let position = 0
- if (size === 0)
- return cb(null, 0)
-
- let bufPos = 0
- const headBuf = Buffer.alloc(512)
- const onread = (er, bytes) => {
- if (er)
- return cb(er)
- bufPos += bytes
- if (bufPos < 512 && bytes)
- return fs.read(
- fd, headBuf, bufPos, headBuf.length - bufPos,
- position + bufPos, onread
- )
-
- if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
- return cb(new Error('cannot append to compressed archives'))
-
- // truncated header
- if (bufPos < 512)
- return cb(null, position)
-
- const h = new Header(headBuf)
- if (!h.cksumValid)
- return cb(null, position)
-
- const entryBlockSize = 512 * Math.ceil(h.size / 512)
- if (position + entryBlockSize + 512 > size)
- return cb(null, position)
-
- position += entryBlockSize + 512
- if (position >= size)
- return cb(null, position)
-
- if (opt.mtimeCache)
- opt.mtimeCache.set(h.path, h.mtime)
- bufPos = 0
- fs.read(fd, headBuf, 0, 512, position, onread)
- }
- fs.read(fd, headBuf, 0, 512, position, onread)
- }
-
- const promise = new Promise((resolve, reject) => {
- p.on('error', reject)
- let flag = 'r+'
- const onopen = (er, fd) => {
- if (er && er.code === 'ENOENT' && flag === 'r+') {
- flag = 'w+'
- return fs.open(opt.file, flag, onopen)
- }
-
- if (er)
- return reject(er)
-
- fs.fstat(fd, (er, st) => {
- if (er)
- return reject(er)
- getPos(fd, st.size, (er, position) => {
- if (er)
- return reject(er)
- const stream = new fsm.WriteStream(opt.file, {
- fd: fd,
- start: position
- })
- p.pipe(stream)
- stream.on('error', reject)
- stream.on('close', resolve)
- addFilesAsync(p, files)
- })
- })
- }
- fs.open(opt.file, flag, onopen)
- })
-
- return cb ? promise.then(cb, cb) : promise
-}
-
-const addFilesSync = (p, files) => {
- files.forEach(file => {
- if (file.charAt(0) === '@')
- t({
- file: path.resolve(p.cwd, file.substr(1)),
- sync: true,
- noResume: true,
- onentry: entry => p.add(entry)
- })
- else
- p.add(file)
- })
- p.end()
-}
-
-const addFilesAsync = (p, files) => {
- while (files.length) {
- const file = files.shift()
- if (file.charAt(0) === '@')
- return t({
- file: path.resolve(p.cwd, file.substr(1)),
- noResume: true,
- onentry: entry => p.add(entry)
- }).then(_ => addFilesAsync(p, files))
- else
- p.add(file)
- }
- p.end()
-}
diff --git a/node_modules/tar/lib/types.js b/node_modules/tar/lib/types.js
deleted file mode 100644
index df42565..0000000
--- a/node_modules/tar/lib/types.js
+++ /dev/null
@@ -1,44 +0,0 @@
-'use strict'
-// map types from key to human-friendly name
-exports.name = new Map([
- ['0', 'File'],
- // same as File
- ['', 'OldFile'],
- ['1', 'Link'],
- ['2', 'SymbolicLink'],
- // Devices and FIFOs aren't fully supported
- // they are parsed, but skipped when unpacking
- ['3', 'CharacterDevice'],
- ['4', 'BlockDevice'],
- ['5', 'Directory'],
- ['6', 'FIFO'],
- // same as File
- ['7', 'ContiguousFile'],
- // pax headers
- ['g', 'GlobalExtendedHeader'],
- ['x', 'ExtendedHeader'],
- // vendor-specific stuff
- // skip
- ['A', 'SolarisACL'],
- // like 5, but with data, which should be skipped
- ['D', 'GNUDumpDir'],
- // metadata only, skip
- ['I', 'Inode'],
- // data = link path of next file
- ['K', 'NextFileHasLongLinkpath'],
- // data = path of next file
- ['L', 'NextFileHasLongPath'],
- // skip
- ['M', 'ContinuationFile'],
- // like L
- ['N', 'OldGnuLongPath'],
- // skip
- ['S', 'SparseFile'],
- // skip
- ['V', 'TapeVolumeHeader'],
- // like x
- ['X', 'OldExtendedHeader']
-])
-
-// map the other direction
-exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
diff --git a/node_modules/tar/lib/unpack.js b/node_modules/tar/lib/unpack.js
deleted file mode 100644
index af0e0ff..0000000
--- a/node_modules/tar/lib/unpack.js
+++ /dev/null
@@ -1,680 +0,0 @@
-'use strict'
-
-// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet.
-// but the path reservations are required to avoid race conditions where
-// parallelized unpack ops may mess with one another, due to dependencies
-// (like a Link depending on its target) or destructive operations (like
-// clobbering an fs object to create one of a different type.)
-
-const assert = require('assert')
-const EE = require('events').EventEmitter
-const Parser = require('./parse.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const path = require('path')
-const mkdir = require('./mkdir.js')
-const mkdirSync = mkdir.sync
-const wc = require('./winchars.js')
-const pathReservations = require('./path-reservations.js')
-
-const ONENTRY = Symbol('onEntry')
-const CHECKFS = Symbol('checkFs')
-const CHECKFS2 = Symbol('checkFs2')
-const ISREUSABLE = Symbol('isReusable')
-const MAKEFS = Symbol('makeFs')
-const FILE = Symbol('file')
-const DIRECTORY = Symbol('directory')
-const LINK = Symbol('link')
-const SYMLINK = Symbol('symlink')
-const HARDLINK = Symbol('hardlink')
-const UNSUPPORTED = Symbol('unsupported')
-const UNKNOWN = Symbol('unknown')
-const CHECKPATH = Symbol('checkPath')
-const MKDIR = Symbol('mkdir')
-const ONERROR = Symbol('onError')
-const PENDING = Symbol('pending')
-const PEND = Symbol('pend')
-const UNPEND = Symbol('unpend')
-const ENDED = Symbol('ended')
-const MAYBECLOSE = Symbol('maybeClose')
-const SKIP = Symbol('skip')
-const DOCHOWN = Symbol('doChown')
-const UID = Symbol('uid')
-const GID = Symbol('gid')
-const crypto = require('crypto')
-const getFlag = require('./get-write-flag.js')
-
-/* istanbul ignore next */
-const neverCalled = () => {
- throw new Error('sync function called cb somehow?!?')
-}
-
-// Unlinks on Windows are not atomic.
-//
-// This means that if you have a file entry, followed by another
-// file entry with an identical name, and you cannot re-use the file
-// (because it's a hardlink, or because unlink:true is set, or it's
-// Windows, which does not have useful nlink values), then the unlink
-// will be committed to the disk AFTER the new file has been written
-// over the old one, deleting the new file.
-//
-// To work around this, on Windows systems, we rename the file and then
-// delete the renamed file. It's a sloppy kludge, but frankly, I do not
-// know of a better way to do this, given windows' non-atomic unlink
-// semantics.
-//
-// See: https://github.com/npm/node-tar/issues/183
-/* istanbul ignore next */
-const unlinkFile = (path, cb) => {
- if (process.platform !== 'win32')
- return fs.unlink(path, cb)
-
- const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
- fs.rename(path, name, er => {
- if (er)
- return cb(er)
- fs.unlink(name, cb)
- })
-}
-
-/* istanbul ignore next */
-const unlinkFileSync = path => {
- if (process.platform !== 'win32')
- return fs.unlinkSync(path)
-
- const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
- fs.renameSync(path, name)
- fs.unlinkSync(name)
-}
-
-// this.gid, entry.gid, this.processUid
-const uint32 = (a, b, c) =>
- a === a >>> 0 ? a
- : b === b >>> 0 ? b
- : c
-
-class Unpack extends Parser {
- constructor (opt) {
- if (!opt)
- opt = {}
-
- opt.ondone = _ => {
- this[ENDED] = true
- this[MAYBECLOSE]()
- }
-
- super(opt)
-
- this.reservations = pathReservations()
-
- this.transform = typeof opt.transform === 'function' ? opt.transform : null
-
- this.writable = true
- this.readable = false
-
- this[PENDING] = 0
- this[ENDED] = false
-
- this.dirCache = opt.dirCache || new Map()
-
- if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
- // need both or neither
- if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number')
- throw new TypeError('cannot set owner without number uid and gid')
- if (opt.preserveOwner)
- throw new TypeError(
- 'cannot preserve owner in archive and also set owner explicitly')
- this.uid = opt.uid
- this.gid = opt.gid
- this.setOwner = true
- } else {
- this.uid = null
- this.gid = null
- this.setOwner = false
- }
-
- // default true for root
- if (opt.preserveOwner === undefined && typeof opt.uid !== 'number')
- this.preserveOwner = process.getuid && process.getuid() === 0
- else
- this.preserveOwner = !!opt.preserveOwner
-
- this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
- process.getuid() : null
- this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
- process.getgid() : null
-
- // mostly just for testing, but useful in some cases.
- // Forcibly trigger a chown on every entry, no matter what
- this.forceChown = opt.forceChown === true
-
- // turn ><?| in filenames into 0xf000-higher encoded forms
- this.win32 = !!opt.win32 || process.platform === 'win32'
-
- // do not unpack over files that are newer than what's in the archive
- this.newer = !!opt.newer
-
- // do not unpack over ANY files
- this.keep = !!opt.keep
-
- // do not set mtime/atime of extracted entries
- this.noMtime = !!opt.noMtime
-
- // allow .., absolute path entries, and unpacking through symlinks
- // without this, warn and skip .., relativize absolutes, and error
- // on symlinks in extraction path
- this.preservePaths = !!opt.preservePaths
-
- // unlink files and links before writing. This breaks existing hard
- // links, and removes symlink directories rather than erroring
- this.unlink = !!opt.unlink
-
- this.cwd = path.resolve(opt.cwd || process.cwd())
- this.strip = +opt.strip || 0
- this.processUmask = process.umask()
- this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
- // default mode for dirs created as parents
- this.dmode = opt.dmode || (0o0777 & (~this.umask))
- this.fmode = opt.fmode || (0o0666 & (~this.umask))
- this.on('entry', entry => this[ONENTRY](entry))
- }
-
- // a bad or damaged archive is a warning for Parser, but an error
- // when extracting. Mark those errors as unrecoverable, because
- // the Unpack contract cannot be met.
- warn (code, msg, data = {}) {
- if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT')
- data.recoverable = false
- return super.warn(code, msg, data)
- }
-
- [MAYBECLOSE] () {
- if (this[ENDED] && this[PENDING] === 0) {
- this.emit('prefinish')
- this.emit('finish')
- this.emit('end')
- this.emit('close')
- }
- }
-
- [CHECKPATH] (entry) {
- if (this.strip) {
- const parts = entry.path.split(/\/|\\/)
- if (parts.length < this.strip)
- return false
- entry.path = parts.slice(this.strip).join('/')
-
- if (entry.type === 'Link') {
- const linkparts = entry.linkpath.split(/\/|\\/)
- if (linkparts.length >= this.strip)
- entry.linkpath = linkparts.slice(this.strip).join('/')
- }
- }
-
- if (!this.preservePaths) {
- const p = entry.path
- if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) {
- this.warn('TAR_ENTRY_ERROR', `path contains '..'`, {
- entry,
- path: p,
- })
- return false
- }
-
- // absolutes on posix are also absolutes on win32
- // so we only need to test this one to get both
- if (path.win32.isAbsolute(p)) {
- const parsed = path.win32.parse(p)
- entry.path = p.substr(parsed.root.length)
- const r = parsed.root
- this.warn('TAR_ENTRY_INFO', `stripping ${r} from absolute path`, {
- entry,
- path: p,
- })
- }
- }
-
- // only encode : chars that aren't drive letter indicators
- if (this.win32) {
- const parsed = path.win32.parse(entry.path)
- entry.path = parsed.root === '' ? wc.encode(entry.path)
- : parsed.root + wc.encode(entry.path.substr(parsed.root.length))
- }
-
- if (path.isAbsolute(entry.path))
- entry.absolute = entry.path
- else
- entry.absolute = path.resolve(this.cwd, entry.path)
-
- return true
- }
-
- [ONENTRY] (entry) {
- if (!this[CHECKPATH](entry))
- return entry.resume()
-
- assert.equal(typeof entry.absolute, 'string')
-
- switch (entry.type) {
- case 'Directory':
- case 'GNUDumpDir':
- if (entry.mode)
- entry.mode = entry.mode | 0o700
-
- case 'File':
- case 'OldFile':
- case 'ContiguousFile':
- case 'Link':
- case 'SymbolicLink':
- return this[CHECKFS](entry)
-
- case 'CharacterDevice':
- case 'BlockDevice':
- case 'FIFO':
- return this[UNSUPPORTED](entry)
- }
- }
-
- [ONERROR] (er, entry) {
- // Cwd has to exist, or else nothing works. That's serious.
- // Other errors are warnings, which raise the error in strict
- // mode, but otherwise continue on.
- if (er.name === 'CwdError')
- this.emit('error', er)
- else {
- this.warn('TAR_ENTRY_ERROR', er, {entry})
- this[UNPEND]()
- entry.resume()
- }
- }
-
- [MKDIR] (dir, mode, cb) {
- mkdir(dir, {
- uid: this.uid,
- gid: this.gid,
- processUid: this.processUid,
- processGid: this.processGid,
- umask: this.processUmask,
- preserve: this.preservePaths,
- unlink: this.unlink,
- cache: this.dirCache,
- cwd: this.cwd,
- mode: mode
- }, cb)
- }
-
- [DOCHOWN] (entry) {
- // in preserve owner mode, chown if the entry doesn't match process
- // in set owner mode, chown if setting doesn't match process
- return this.forceChown ||
- this.preserveOwner &&
- ( typeof entry.uid === 'number' && entry.uid !== this.processUid ||
- typeof entry.gid === 'number' && entry.gid !== this.processGid )
- ||
- ( typeof this.uid === 'number' && this.uid !== this.processUid ||
- typeof this.gid === 'number' && this.gid !== this.processGid )
- }
-
- [UID] (entry) {
- return uint32(this.uid, entry.uid, this.processUid)
- }
-
- [GID] (entry) {
- return uint32(this.gid, entry.gid, this.processGid)
- }
-
- [FILE] (entry, fullyDone) {
- const mode = entry.mode & 0o7777 || this.fmode
- const stream = new fsm.WriteStream(entry.absolute, {
- flags: getFlag(entry.size),
- mode: mode,
- autoClose: false
- })
- stream.on('error', er => this[ONERROR](er, entry))
-
- let actions = 1
- const done = er => {
- if (er)
- return this[ONERROR](er, entry)
-
- if (--actions === 0) {
- fs.close(stream.fd, er => {
- fullyDone()
- er ? this[ONERROR](er, entry) : this[UNPEND]()
- })
- }
- }
-
- stream.on('finish', _ => {
- // if futimes fails, try utimes
- // if utimes fails, fail with the original error
- // same for fchown/chown
- const abs = entry.absolute
- const fd = stream.fd
-
- if (entry.mtime && !this.noMtime) {
- actions++
- const atime = entry.atime || new Date()
- const mtime = entry.mtime
- fs.futimes(fd, atime, mtime, er =>
- er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
- : done())
- }
-
- if (this[DOCHOWN](entry)) {
- actions++
- const uid = this[UID](entry)
- const gid = this[GID](entry)
- fs.fchown(fd, uid, gid, er =>
- er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
- : done())
- }
-
- done()
- })
-
- const tx = this.transform ? this.transform(entry) || entry : entry
- if (tx !== entry) {
- tx.on('error', er => this[ONERROR](er, entry))
- entry.pipe(tx)
- }
- tx.pipe(stream)
- }
-
- [DIRECTORY] (entry, fullyDone) {
- const mode = entry.mode & 0o7777 || this.dmode
- this[MKDIR](entry.absolute, mode, er => {
- if (er) {
- fullyDone()
- return this[ONERROR](er, entry)
- }
-
- let actions = 1
- const done = _ => {
- if (--actions === 0) {
- fullyDone()
- this[UNPEND]()
- entry.resume()
- }
- }
-
- if (entry.mtime && !this.noMtime) {
- actions++
- fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
- }
-
- if (this[DOCHOWN](entry)) {
- actions++
- fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
- }
-
- done()
- })
- }
-
- [UNSUPPORTED] (entry) {
- entry.unsupported = true
- this.warn('TAR_ENTRY_UNSUPPORTED',
- `unsupported entry type: ${entry.type}`, {entry})
- entry.resume()
- }
-
- [SYMLINK] (entry, done) {
- this[LINK](entry, entry.linkpath, 'symlink', done)
- }
-
- [HARDLINK] (entry, done) {
- this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link', done)
- }
-
- [PEND] () {
- this[PENDING]++
- }
-
- [UNPEND] () {
- this[PENDING]--
- this[MAYBECLOSE]()
- }
-
- [SKIP] (entry) {
- this[UNPEND]()
- entry.resume()
- }
-
- // Check if we can reuse an existing filesystem entry safely and
- // overwrite it, rather than unlinking and recreating
- // Windows doesn't report a useful nlink, so we just never reuse entries
- [ISREUSABLE] (entry, st) {
- return entry.type === 'File' &&
- !this.unlink &&
- st.isFile() &&
- st.nlink <= 1 &&
- process.platform !== 'win32'
- }
-
- // check if a thing is there, and if so, try to clobber it
- [CHECKFS] (entry) {
- this[PEND]()
- const paths = [entry.path]
- if (entry.linkpath)
- paths.push(entry.linkpath)
- this.reservations.reserve(paths, done => this[CHECKFS2](entry, done))
- }
- [CHECKFS2] (entry, done) {
- this[MKDIR](path.dirname(entry.absolute), this.dmode, er => {
- if (er) {
- done()
- return this[ONERROR](er, entry)
- }
- fs.lstat(entry.absolute, (er, st) => {
- if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
- this[SKIP](entry)
- done()
- } else if (er || this[ISREUSABLE](entry, st)) {
- this[MAKEFS](null, entry, done)
- }
- else if (st.isDirectory()) {
- if (entry.type === 'Directory') {
- if (!entry.mode || (st.mode & 0o7777) === entry.mode)
- this[MAKEFS](null, entry, done)
- else
- fs.chmod(entry.absolute, entry.mode,
- er => this[MAKEFS](er, entry, done))
- } else
- fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry, done))
- } else
- unlinkFile(entry.absolute, er => this[MAKEFS](er, entry, done))
- })
- })
- }
-
- [MAKEFS] (er, entry, done) {
- if (er)
- return this[ONERROR](er, entry)
-
- switch (entry.type) {
- case 'File':
- case 'OldFile':
- case 'ContiguousFile':
- return this[FILE](entry, done)
-
- case 'Link':
- return this[HARDLINK](entry, done)
-
- case 'SymbolicLink':
- return this[SYMLINK](entry, done)
-
- case 'Directory':
- case 'GNUDumpDir':
- return this[DIRECTORY](entry, done)
- }
- }
-
- [LINK] (entry, linkpath, link, done) {
- // XXX: get the type ('file' or 'dir') for windows
- fs[link](linkpath, entry.absolute, er => {
- if (er)
- return this[ONERROR](er, entry)
- done()
- this[UNPEND]()
- entry.resume()
- })
- }
-}
-
-class UnpackSync extends Unpack {
- constructor (opt) {
- super(opt)
- }
-
- [CHECKFS] (entry) {
- const er = this[MKDIR](path.dirname(entry.absolute), this.dmode, neverCalled)
- if (er)
- return this[ONERROR](er, entry)
- try {
- const st = fs.lstatSync(entry.absolute)
- if (this.keep || this.newer && st.mtime > entry.mtime)
- return this[SKIP](entry)
- else if (this[ISREUSABLE](entry, st))
- return this[MAKEFS](null, entry, neverCalled)
- else {
- try {
- if (st.isDirectory()) {
- if (entry.type === 'Directory') {
- if (entry.mode && (st.mode & 0o7777) !== entry.mode)
- fs.chmodSync(entry.absolute, entry.mode)
- } else
- fs.rmdirSync(entry.absolute)
- } else
- unlinkFileSync(entry.absolute)
- return this[MAKEFS](null, entry, neverCalled)
- } catch (er) {
- return this[ONERROR](er, entry)
- }
- }
- } catch (er) {
- return this[MAKEFS](null, entry, neverCalled)
- }
- }
-
- [FILE] (entry, _) {
- const mode = entry.mode & 0o7777 || this.fmode
-
- const oner = er => {
- let closeError
- try {
- fs.closeSync(fd)
- } catch (e) {
- closeError = e
- }
- if (er || closeError)
- this[ONERROR](er || closeError, entry)
- }
-
- let stream
- let fd
- try {
- fd = fs.openSync(entry.absolute, getFlag(entry.size), mode)
- } catch (er) {
- return oner(er)
- }
- const tx = this.transform ? this.transform(entry) || entry : entry
- if (tx !== entry) {
- tx.on('error', er => this[ONERROR](er, entry))
- entry.pipe(tx)
- }
-
- tx.on('data', chunk => {
- try {
- fs.writeSync(fd, chunk, 0, chunk.length)
- } catch (er) {
- oner(er)
- }
- })
-
- tx.on('end', _ => {
- let er = null
- // try both, falling futimes back to utimes
- // if either fails, handle the first error
- if (entry.mtime && !this.noMtime) {
- const atime = entry.atime || new Date()
- const mtime = entry.mtime
- try {
- fs.futimesSync(fd, atime, mtime)
- } catch (futimeser) {
- try {
- fs.utimesSync(entry.absolute, atime, mtime)
- } catch (utimeser) {
- er = futimeser
- }
- }
- }
-
- if (this[DOCHOWN](entry)) {
- const uid = this[UID](entry)
- const gid = this[GID](entry)
-
- try {
- fs.fchownSync(fd, uid, gid)
- } catch (fchowner) {
- try {
- fs.chownSync(entry.absolute, uid, gid)
- } catch (chowner) {
- er = er || fchowner
- }
- }
- }
-
- oner(er)
- })
- }
-
- [DIRECTORY] (entry, _) {
- const mode = entry.mode & 0o7777 || this.dmode
- const er = this[MKDIR](entry.absolute, mode)
- if (er)
- return this[ONERROR](er, entry)
- if (entry.mtime && !this.noMtime) {
- try {
- fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
- } catch (er) {}
- }
- if (this[DOCHOWN](entry)) {
- try {
- fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
- } catch (er) {}
- }
- entry.resume()
- }
-
- [MKDIR] (dir, mode) {
- try {
- return mkdir.sync(dir, {
- uid: this.uid,
- gid: this.gid,
- processUid: this.processUid,
- processGid: this.processGid,
- umask: this.processUmask,
- preserve: this.preservePaths,
- unlink: this.unlink,
- cache: this.dirCache,
- cwd: this.cwd,
- mode: mode
- })
- } catch (er) {
- return er
- }
- }
-
- [LINK] (entry, linkpath, link, _) {
- try {
- fs[link + 'Sync'](linkpath, entry.absolute)
- entry.resume()
- } catch (er) {
- return this[ONERROR](er, entry)
- }
- }
-}
-
-Unpack.Sync = UnpackSync
-module.exports = Unpack
diff --git a/node_modules/tar/lib/update.js b/node_modules/tar/lib/update.js
deleted file mode 100644
index 16c3e93..0000000
--- a/node_modules/tar/lib/update.js
+++ /dev/null
@@ -1,36 +0,0 @@
-'use strict'
-
-// tar -u
-
-const hlo = require('./high-level-opt.js')
-const r = require('./replace.js')
-// just call tar.r with the filter and mtimeCache
-
-const u = module.exports = (opt_, files, cb) => {
- const opt = hlo(opt_)
-
- if (!opt.file)
- throw new TypeError('file is required')
-
- if (opt.gzip)
- throw new TypeError('cannot append to compressed archives')
-
- if (!files || !Array.isArray(files) || !files.length)
- throw new TypeError('no files or directories specified')
-
- files = Array.from(files)
-
- mtimeFilter(opt)
- return r(opt, files, cb)
-}
-
-const mtimeFilter = opt => {
- const filter = opt.filter
-
- if (!opt.mtimeCache)
- opt.mtimeCache = new Map()
-
- opt.filter = filter ? (path, stat) =>
- filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
- : (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
-}
diff --git a/node_modules/tar/lib/warn-mixin.js b/node_modules/tar/lib/warn-mixin.js
deleted file mode 100644
index 11eb52c..0000000
--- a/node_modules/tar/lib/warn-mixin.js
+++ /dev/null
@@ -1,21 +0,0 @@
-'use strict'
-module.exports = Base => class extends Base {
- warn (code, message, data = {}) {
- if (this.file)
- data.file = this.file
- if (this.cwd)
- data.cwd = this.cwd
- data.code = message instanceof Error && message.code || code
- data.tarCode = code
- if (!this.strict && data.recoverable !== false) {
- if (message instanceof Error) {
- data = Object.assign(message, data)
- message = message.message
- }
- this.emit('warn', data.tarCode, message, data)
- } else if (message instanceof Error) {
- this.emit('error', Object.assign(message, data))
- } else
- this.emit('error', Object.assign(new Error(`${code}: ${message}`), data))
- }
-}
diff --git a/node_modules/tar/lib/winchars.js b/node_modules/tar/lib/winchars.js
deleted file mode 100644
index cf6ea06..0000000
--- a/node_modules/tar/lib/winchars.js
+++ /dev/null
@@ -1,23 +0,0 @@
-'use strict'
-
-// When writing files on Windows, translate the characters to their
-// 0xf000 higher-encoded versions.
-
-const raw = [
- '|',
- '<',
- '>',
- '?',
- ':'
-]
-
-const win = raw.map(char =>
- String.fromCharCode(0xf000 + char.charCodeAt(0)))
-
-const toWin = new Map(raw.map((char, i) => [char, win[i]]))
-const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
-
-module.exports = {
- encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
- decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s)
-}
diff --git a/node_modules/tar/lib/write-entry.js b/node_modules/tar/lib/write-entry.js
deleted file mode 100644
index 0e33cb5..0000000
--- a/node_modules/tar/lib/write-entry.js
+++ /dev/null
@@ -1,436 +0,0 @@
-'use strict'
-const MiniPass = require('minipass')
-const Pax = require('./pax.js')
-const Header = require('./header.js')
-const ReadEntry = require('./read-entry.js')
-const fs = require('fs')
-const path = require('path')
-
-const types = require('./types.js')
-const maxReadSize = 16 * 1024 * 1024
-const PROCESS = Symbol('process')
-const FILE = Symbol('file')
-const DIRECTORY = Symbol('directory')
-const SYMLINK = Symbol('symlink')
-const HARDLINK = Symbol('hardlink')
-const HEADER = Symbol('header')
-const READ = Symbol('read')
-const LSTAT = Symbol('lstat')
-const ONLSTAT = Symbol('onlstat')
-const ONREAD = Symbol('onread')
-const ONREADLINK = Symbol('onreadlink')
-const OPENFILE = Symbol('openfile')
-const ONOPENFILE = Symbol('onopenfile')
-const CLOSE = Symbol('close')
-const MODE = Symbol('mode')
-const warner = require('./warn-mixin.js')
-const winchars = require('./winchars.js')
-
-const modeFix = require('./mode-fix.js')
-
-const WriteEntry = warner(class WriteEntry extends MiniPass {
- constructor (p, opt) {
- opt = opt || {}
- super(opt)
- if (typeof p !== 'string')
- throw new TypeError('path is required')
- this.path = p
- // suppress atime, ctime, uid, gid, uname, gname
- this.portable = !!opt.portable
- // until node has builtin pwnam functions, this'll have to do
- this.myuid = process.getuid && process.getuid()
- this.myuser = process.env.USER || ''
- this.maxReadSize = opt.maxReadSize || maxReadSize
- this.linkCache = opt.linkCache || new Map()
- this.statCache = opt.statCache || new Map()
- this.preservePaths = !!opt.preservePaths
- this.cwd = opt.cwd || process.cwd()
- this.strict = !!opt.strict
- this.noPax = !!opt.noPax
- this.noMtime = !!opt.noMtime
- this.mtime = opt.mtime || null
-
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
-
- let pathWarn = false
- if (!this.preservePaths && path.win32.isAbsolute(p)) {
- // absolutes on posix are also absolutes on win32
- // so we only need to test this one to get both
- const parsed = path.win32.parse(p)
- this.path = p.substr(parsed.root.length)
- pathWarn = parsed.root
- }
-
- this.win32 = !!opt.win32 || process.platform === 'win32'
- if (this.win32) {
- this.path = winchars.decode(this.path.replace(/\\/g, '/'))
- p = p.replace(/\\/g, '/')
- }
-
- this.absolute = opt.absolute || path.resolve(this.cwd, p)
-
- if (this.path === '')
- this.path = './'
-
- if (pathWarn) {
- this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
- entry: this,
- path: pathWarn + this.path,
- })
- }
-
- if (this.statCache.has(this.absolute))
- this[ONLSTAT](this.statCache.get(this.absolute))
- else
- this[LSTAT]()
- }
-
- [LSTAT] () {
- fs.lstat(this.absolute, (er, stat) => {
- if (er)
- return this.emit('error', er)
- this[ONLSTAT](stat)
- })
- }
-
- [ONLSTAT] (stat) {
- this.statCache.set(this.absolute, stat)
- this.stat = stat
- if (!stat.isFile())
- stat.size = 0
- this.type = getType(stat)
- this.emit('stat', stat)
- this[PROCESS]()
- }
-
- [PROCESS] () {
- switch (this.type) {
- case 'File': return this[FILE]()
- case 'Directory': return this[DIRECTORY]()
- case 'SymbolicLink': return this[SYMLINK]()
- // unsupported types are ignored.
- default: return this.end()
- }
- }
-
- [MODE] (mode) {
- return modeFix(mode, this.type === 'Directory', this.portable)
- }
-
- [HEADER] () {
- if (this.type === 'Directory' && this.portable)
- this.noMtime = true
-
- this.header = new Header({
- path: this.path,
- linkpath: this.linkpath,
- // only the permissions and setuid/setgid/sticky bitflags
- // not the higher-order bits that specify file type
- mode: this[MODE](this.stat.mode),
- uid: this.portable ? null : this.stat.uid,
- gid: this.portable ? null : this.stat.gid,
- size: this.stat.size,
- mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
- type: this.type,
- uname: this.portable ? null :
- this.stat.uid === this.myuid ? this.myuser : '',
- atime: this.portable ? null : this.stat.atime,
- ctime: this.portable ? null : this.stat.ctime
- })
-
- if (this.header.encode() && !this.noPax)
- this.write(new Pax({
- atime: this.portable ? null : this.header.atime,
- ctime: this.portable ? null : this.header.ctime,
- gid: this.portable ? null : this.header.gid,
- mtime: this.noMtime ? null : this.mtime || this.header.mtime,
- path: this.path,
- linkpath: this.linkpath,
- size: this.header.size,
- uid: this.portable ? null : this.header.uid,
- uname: this.portable ? null : this.header.uname,
- dev: this.portable ? null : this.stat.dev,
- ino: this.portable ? null : this.stat.ino,
- nlink: this.portable ? null : this.stat.nlink
- }).encode())
- this.write(this.header.block)
- }
-
- [DIRECTORY] () {
- if (this.path.substr(-1) !== '/')
- this.path += '/'
- this.stat.size = 0
- this[HEADER]()
- this.end()
- }
-
- [SYMLINK] () {
- fs.readlink(this.absolute, (er, linkpath) => {
- if (er)
- return this.emit('error', er)
- this[ONREADLINK](linkpath)
- })
- }
-
- [ONREADLINK] (linkpath) {
- this.linkpath = linkpath.replace(/\\/g, '/')
- this[HEADER]()
- this.end()
- }
-
- [HARDLINK] (linkpath) {
- this.type = 'Link'
- this.linkpath = path.relative(this.cwd, linkpath).replace(/\\/g, '/')
- this.stat.size = 0
- this[HEADER]()
- this.end()
- }
-
- [FILE] () {
- if (this.stat.nlink > 1) {
- const linkKey = this.stat.dev + ':' + this.stat.ino
- if (this.linkCache.has(linkKey)) {
- const linkpath = this.linkCache.get(linkKey)
- if (linkpath.indexOf(this.cwd) === 0)
- return this[HARDLINK](linkpath)
- }
- this.linkCache.set(linkKey, this.absolute)
- }
-
- this[HEADER]()
- if (this.stat.size === 0)
- return this.end()
-
- this[OPENFILE]()
- }
-
- [OPENFILE] () {
- fs.open(this.absolute, 'r', (er, fd) => {
- if (er)
- return this.emit('error', er)
- this[ONOPENFILE](fd)
- })
- }
-
- [ONOPENFILE] (fd) {
- const blockLen = 512 * Math.ceil(this.stat.size / 512)
- const bufLen = Math.min(blockLen, this.maxReadSize)
- const buf = Buffer.allocUnsafe(bufLen)
- this[READ](fd, buf, 0, buf.length, 0, this.stat.size, blockLen)
- }
-
- [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
- fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
- if (er) {
- // ignoring the error from close(2) is a bad practice, but at
- // this point we already have an error, don't need another one
- return this[CLOSE](fd, () => this.emit('error', er))
- }
- this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
- })
- }
-
- [CLOSE] (fd, cb) {
- fs.close(fd, cb)
- }
-
- [ONREAD] (fd, buf, offset, length, pos, remain, blockRemain, bytesRead) {
- if (bytesRead <= 0 && remain > 0) {
- const er = new Error('encountered unexpected EOF')
- er.path = this.absolute
- er.syscall = 'read'
- er.code = 'EOF'
- return this[CLOSE](fd, () => this.emit('error', er))
- }
-
- if (bytesRead > remain) {
- const er = new Error('did not encounter expected EOF')
- er.path = this.absolute
- er.syscall = 'read'
- er.code = 'EOF'
- return this[CLOSE](fd, () => this.emit('error', er))
- }
-
- // null out the rest of the buffer, if we could fit the block padding
- if (bytesRead === remain) {
- for (let i = bytesRead; i < length && bytesRead < blockRemain; i++) {
- buf[i + offset] = 0
- bytesRead ++
- remain ++
- }
- }
-
- const writeBuf = offset === 0 && bytesRead === buf.length ?
- buf : buf.slice(offset, offset + bytesRead)
- remain -= bytesRead
- blockRemain -= bytesRead
- pos += bytesRead
- offset += bytesRead
-
- this.write(writeBuf)
-
- if (!remain) {
- if (blockRemain)
- this.write(Buffer.alloc(blockRemain))
- return this[CLOSE](fd, er => er ? this.emit('error', er) : this.end())
- }
-
- if (offset >= length) {
- buf = Buffer.allocUnsafe(length)
- offset = 0
- }
- length = buf.length - offset
- this[READ](fd, buf, offset, length, pos, remain, blockRemain)
- }
-})
-
-class WriteEntrySync extends WriteEntry {
- constructor (path, opt) {
- super(path, opt)
- }
-
- [LSTAT] () {
- this[ONLSTAT](fs.lstatSync(this.absolute))
- }
-
- [SYMLINK] () {
- this[ONREADLINK](fs.readlinkSync(this.absolute))
- }
-
- [OPENFILE] () {
- this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
- }
-
- [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
- let threw = true
- try {
- const bytesRead = fs.readSync(fd, buf, offset, length, pos)
- this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
- threw = false
- } finally {
- // ignoring the error from close(2) is a bad practice, but at
- // this point we already have an error, don't need another one
- if (threw)
- try { this[CLOSE](fd, () => {}) } catch (er) {}
- }
- }
-
- [CLOSE] (fd, cb) {
- fs.closeSync(fd)
- cb()
- }
-}
-
-const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
- constructor (readEntry, opt) {
- opt = opt || {}
- super(opt)
- this.preservePaths = !!opt.preservePaths
- this.portable = !!opt.portable
- this.strict = !!opt.strict
- this.noPax = !!opt.noPax
- this.noMtime = !!opt.noMtime
-
- this.readEntry = readEntry
- this.type = readEntry.type
- if (this.type === 'Directory' && this.portable)
- this.noMtime = true
-
- this.path = readEntry.path
- this.mode = this[MODE](readEntry.mode)
- this.uid = this.portable ? null : readEntry.uid
- this.gid = this.portable ? null : readEntry.gid
- this.uname = this.portable ? null : readEntry.uname
- this.gname = this.portable ? null : readEntry.gname
- this.size = readEntry.size
- this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
- this.atime = this.portable ? null : readEntry.atime
- this.ctime = this.portable ? null : readEntry.ctime
- this.linkpath = readEntry.linkpath
-
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
-
- let pathWarn = false
- if (path.isAbsolute(this.path) && !this.preservePaths) {
- const parsed = path.parse(this.path)
- pathWarn = parsed.root
- this.path = this.path.substr(parsed.root.length)
- }
-
- this.remain = readEntry.size
- this.blockRemain = readEntry.startBlockSize
-
- this.header = new Header({
- path: this.path,
- linkpath: this.linkpath,
- // only the permissions and setuid/setgid/sticky bitflags
- // not the higher-order bits that specify file type
- mode: this.mode,
- uid: this.portable ? null : this.uid,
- gid: this.portable ? null : this.gid,
- size: this.size,
- mtime: this.noMtime ? null : this.mtime,
- type: this.type,
- uname: this.portable ? null : this.uname,
- atime: this.portable ? null : this.atime,
- ctime: this.portable ? null : this.ctime
- })
-
- if (pathWarn) {
- this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
- entry: this,
- path: pathWarn + this.path,
- })
- }
-
- if (this.header.encode() && !this.noPax)
- super.write(new Pax({
- atime: this.portable ? null : this.atime,
- ctime: this.portable ? null : this.ctime,
- gid: this.portable ? null : this.gid,
- mtime: this.noMtime ? null : this.mtime,
- path: this.path,
- linkpath: this.linkpath,
- size: this.size,
- uid: this.portable ? null : this.uid,
- uname: this.portable ? null : this.uname,
- dev: this.portable ? null : this.readEntry.dev,
- ino: this.portable ? null : this.readEntry.ino,
- nlink: this.portable ? null : this.readEntry.nlink
- }).encode())
-
- super.write(this.header.block)
- readEntry.pipe(this)
- }
-
- [MODE] (mode) {
- return modeFix(mode, this.type === 'Directory', this.portable)
- }
-
- write (data) {
- const writeLen = data.length
- if (writeLen > this.blockRemain)
- throw new Error('writing more to entry than is appropriate')
- this.blockRemain -= writeLen
- return super.write(data)
- }
-
- end () {
- if (this.blockRemain)
- this.write(Buffer.alloc(this.blockRemain))
- return super.end()
- }
-})
-
-WriteEntry.Sync = WriteEntrySync
-WriteEntry.Tar = WriteEntryTar
-
-const getType = stat =>
- stat.isFile() ? 'File'
- : stat.isDirectory() ? 'Directory'
- : stat.isSymbolicLink() ? 'SymbolicLink'
- : 'Unsupported'
-
-module.exports = WriteEntry
diff --git a/node_modules/tar/node_modules/.bin/mkdirp b/node_modules/tar/node_modules/.bin/mkdirp
deleted file mode 120000
index 017896c..0000000
--- a/node_modules/tar/node_modules/.bin/mkdirp
+++ /dev/null
@@ -1 +0,0 @@
-../mkdirp/bin/cmd.js \ No newline at end of file
diff --git a/node_modules/tar/node_modules/mkdirp/CHANGELOG.md b/node_modules/tar/node_modules/mkdirp/CHANGELOG.md
deleted file mode 100644
index 8145838..0000000
--- a/node_modules/tar/node_modules/mkdirp/CHANGELOG.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Changers Lorgs!
-
-## 1.0
-
-Full rewrite. Essentially a brand new module.
-
-- Return a promise instead of taking a callback.
-- Use native `fs.mkdir(path, { recursive: true })` when available.
-- Drop support for outdated Node.js versions. (Technically still works on
- Node.js v8, but only 10 and above are officially supported.)
-
-## 0.x
-
-Original and most widely used recursive directory creation implementation
-in JavaScript, dating back to 2010.
diff --git a/node_modules/tar/node_modules/mkdirp/LICENSE b/node_modules/tar/node_modules/mkdirp/LICENSE
deleted file mode 100644
index 13fcd15..0000000
--- a/node_modules/tar/node_modules/mkdirp/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright James Halliday (mail@substack.net) and Isaac Z. Schlueter (i@izs.me)
-
-This project is free software released under the MIT license:
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/node_modules/tar/node_modules/mkdirp/bin/cmd.js b/node_modules/tar/node_modules/mkdirp/bin/cmd.js
deleted file mode 100755
index 6e0aa8d..0000000
--- a/node_modules/tar/node_modules/mkdirp/bin/cmd.js
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env node
-
-const usage = () => `
-usage: mkdirp [DIR1,DIR2..] {OPTIONS}
-
- Create each supplied directory including any necessary parent directories
- that don't yet exist.
-
- If the directory already exists, do nothing.
-
-OPTIONS are:
-
- -m<mode> If a directory needs to be created, set the mode as an octal
- --mode=<mode> permission string.
-
- -v --version Print the mkdirp version number
-
- -h --help Print this helpful banner
-
- -p --print Print the first directories created for each path provided
-
- --manual Use manual implementation, even if native is available
-`
-
-const dirs = []
-const opts = {}
-let print = false
-let dashdash = false
-let manual = false
-for (const arg of process.argv.slice(2)) {
- if (dashdash)
- dirs.push(arg)
- else if (arg === '--')
- dashdash = true
- else if (arg === '--manual')
- manual = true
- else if (/^-h/.test(arg) || /^--help/.test(arg)) {
- console.log(usage())
- process.exit(0)
- } else if (arg === '-v' || arg === '--version') {
- console.log(require('../package.json').version)
- process.exit(0)
- } else if (arg === '-p' || arg === '--print') {
- print = true
- } else if (/^-m/.test(arg) || /^--mode=/.test(arg)) {
- const mode = parseInt(arg.replace(/^(-m|--mode=)/, ''), 8)
- if (isNaN(mode)) {
- console.error(`invalid mode argument: ${arg}\nMust be an octal number.`)
- process.exit(1)
- }
- opts.mode = mode
- } else
- dirs.push(arg)
-}
-
-const mkdirp = require('../')
-const impl = manual ? mkdirp.manual : mkdirp
-if (dirs.length === 0)
- console.error(usage())
-
-Promise.all(dirs.map(dir => impl(dir, opts)))
- .then(made => print ? made.forEach(m => m && console.log(m)) : null)
- .catch(er => {
- console.error(er.message)
- if (er.code)
- console.error(' code: ' + er.code)
- process.exit(1)
- })
diff --git a/node_modules/tar/node_modules/mkdirp/index.js b/node_modules/tar/node_modules/mkdirp/index.js
deleted file mode 100644
index ad7a16c..0000000
--- a/node_modules/tar/node_modules/mkdirp/index.js
+++ /dev/null
@@ -1,31 +0,0 @@
-const optsArg = require('./lib/opts-arg.js')
-const pathArg = require('./lib/path-arg.js')
-
-const {mkdirpNative, mkdirpNativeSync} = require('./lib/mkdirp-native.js')
-const {mkdirpManual, mkdirpManualSync} = require('./lib/mkdirp-manual.js')
-const {useNative, useNativeSync} = require('./lib/use-native.js')
-
-
-const mkdirp = (path, opts) => {
- path = pathArg(path)
- opts = optsArg(opts)
- return useNative(opts)
- ? mkdirpNative(path, opts)
- : mkdirpManual(path, opts)
-}
-
-const mkdirpSync = (path, opts) => {
- path = pathArg(path)
- opts = optsArg(opts)
- return useNativeSync(opts)
- ? mkdirpNativeSync(path, opts)
- : mkdirpManualSync(path, opts)
-}
-
-mkdirp.sync = mkdirpSync
-mkdirp.native = (path, opts) => mkdirpNative(pathArg(path), optsArg(opts))
-mkdirp.manual = (path, opts) => mkdirpManual(pathArg(path), optsArg(opts))
-mkdirp.nativeSync = (path, opts) => mkdirpNativeSync(pathArg(path), optsArg(opts))
-mkdirp.manualSync = (path, opts) => mkdirpManualSync(pathArg(path), optsArg(opts))
-
-module.exports = mkdirp
diff --git a/node_modules/tar/node_modules/mkdirp/lib/find-made.js b/node_modules/tar/node_modules/mkdirp/lib/find-made.js
deleted file mode 100644
index 022e492..0000000
--- a/node_modules/tar/node_modules/mkdirp/lib/find-made.js
+++ /dev/null
@@ -1,29 +0,0 @@
-const {dirname} = require('path')
-
-const findMade = (opts, parent, path = undefined) => {
- // we never want the 'made' return value to be a root directory
- if (path === parent)
- return Promise.resolve()
-
- return opts.statAsync(parent).then(
- st => st.isDirectory() ? path : undefined, // will fail later
- er => er.code === 'ENOENT'
- ? findMade(opts, dirname(parent), parent)
- : undefined
- )
-}
-
-const findMadeSync = (opts, parent, path = undefined) => {
- if (path === parent)
- return undefined
-
- try {
- return opts.statSync(parent).isDirectory() ? path : undefined
- } catch (er) {
- return er.code === 'ENOENT'
- ? findMadeSync(opts, dirname(parent), parent)
- : undefined
- }
-}
-
-module.exports = {findMade, findMadeSync}
diff --git a/node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js b/node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js
deleted file mode 100644
index 2eb18cd..0000000
--- a/node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js
+++ /dev/null
@@ -1,64 +0,0 @@
-const {dirname} = require('path')
-
-const mkdirpManual = (path, opts, made) => {
- opts.recursive = false
- const parent = dirname(path)
- if (parent === path) {
- return opts.mkdirAsync(path, opts).catch(er => {
- // swallowed by recursive implementation on posix systems
- // any other error is a failure
- if (er.code !== 'EISDIR')
- throw er
- })
- }
-
- return opts.mkdirAsync(path, opts).then(() => made || path, er => {
- if (er.code === 'ENOENT')
- return mkdirpManual(parent, opts)
- .then(made => mkdirpManual(path, opts, made))
- if (er.code !== 'EEXIST' && er.code !== 'EROFS')
- throw er
- return opts.statAsync(path).then(st => {
- if (st.isDirectory())
- return made
- else
- throw er
- }, () => { throw er })
- })
-}
-
-const mkdirpManualSync = (path, opts, made) => {
- const parent = dirname(path)
- opts.recursive = false
-
- if (parent === path) {
- try {
- return opts.mkdirSync(path, opts)
- } catch (er) {
- // swallowed by recursive implementation on posix systems
- // any other error is a failure
- if (er.code !== 'EISDIR')
- throw er
- else
- return
- }
- }
-
- try {
- opts.mkdirSync(path, opts)
- return made || path
- } catch (er) {
- if (er.code === 'ENOENT')
- return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made))
- if (er.code !== 'EEXIST' && er.code !== 'EROFS')
- throw er
- try {
- if (!opts.statSync(path).isDirectory())
- throw er
- } catch (_) {
- throw er
- }
- }
-}
-
-module.exports = {mkdirpManual, mkdirpManualSync}
diff --git a/node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js b/node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js
deleted file mode 100644
index c7a6b69..0000000
--- a/node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js
+++ /dev/null
@@ -1,39 +0,0 @@
-const {dirname} = require('path')
-const {findMade, findMadeSync} = require('./find-made.js')
-const {mkdirpManual, mkdirpManualSync} = require('./mkdirp-manual.js')
-
-const mkdirpNative = (path, opts) => {
- opts.recursive = true
- const parent = dirname(path)
- if (parent === path)
- return opts.mkdirAsync(path, opts)
-
- return findMade(opts, path).then(made =>
- opts.mkdirAsync(path, opts).then(() => made)
- .catch(er => {
- if (er.code === 'ENOENT')
- return mkdirpManual(path, opts)
- else
- throw er
- }))
-}
-
-const mkdirpNativeSync = (path, opts) => {
- opts.recursive = true
- const parent = dirname(path)
- if (parent === path)
- return opts.mkdirSync(path, opts)
-
- const made = findMadeSync(opts, path)
- try {
- opts.mkdirSync(path, opts)
- return made
- } catch (er) {
- if (er.code === 'ENOENT')
- return mkdirpManualSync(path, opts)
- else
- throw er
- }
-}
-
-module.exports = {mkdirpNative, mkdirpNativeSync}
diff --git a/node_modules/tar/node_modules/mkdirp/lib/opts-arg.js b/node_modules/tar/node_modules/mkdirp/lib/opts-arg.js
deleted file mode 100644
index 2fa4833..0000000
--- a/node_modules/tar/node_modules/mkdirp/lib/opts-arg.js
+++ /dev/null
@@ -1,23 +0,0 @@
-const { promisify } = require('util')
-const fs = require('fs')
-const optsArg = opts => {
- if (!opts)
- opts = { mode: 0o777, fs }
- else if (typeof opts === 'object')
- opts = { mode: 0o777, fs, ...opts }
- else if (typeof opts === 'number')
- opts = { mode: opts, fs }
- else if (typeof opts === 'string')
- opts = { mode: parseInt(opts, 8), fs }
- else
- throw new TypeError('invalid options argument')
-
- opts.mkdir = opts.mkdir || opts.fs.mkdir || fs.mkdir
- opts.mkdirAsync = promisify(opts.mkdir)
- opts.stat = opts.stat || opts.fs.stat || fs.stat
- opts.statAsync = promisify(opts.stat)
- opts.statSync = opts.statSync || opts.fs.statSync || fs.statSync
- opts.mkdirSync = opts.mkdirSync || opts.fs.mkdirSync || fs.mkdirSync
- return opts
-}
-module.exports = optsArg
diff --git a/node_modules/tar/node_modules/mkdirp/lib/path-arg.js b/node_modules/tar/node_modules/mkdirp/lib/path-arg.js
deleted file mode 100644
index cc07de5..0000000
--- a/node_modules/tar/node_modules/mkdirp/lib/path-arg.js
+++ /dev/null
@@ -1,29 +0,0 @@
-const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform
-const { resolve, parse } = require('path')
-const pathArg = path => {
- if (/\0/.test(path)) {
- // simulate same failure that node raises
- throw Object.assign(
- new TypeError('path must be a string without null bytes'),
- {
- path,
- code: 'ERR_INVALID_ARG_VALUE',
- }
- )
- }
-
- path = resolve(path)
- if (platform === 'win32') {
- const badWinChars = /[*|"<>?:]/
- const {root} = parse(path)
- if (badWinChars.test(path.substr(root.length))) {
- throw Object.assign(new Error('Illegal characters in path.'), {
- path,
- code: 'EINVAL',
- })
- }
- }
-
- return path
-}
-module.exports = pathArg
diff --git a/node_modules/tar/node_modules/mkdirp/lib/use-native.js b/node_modules/tar/node_modules/mkdirp/lib/use-native.js
deleted file mode 100644
index 079361d..0000000
--- a/node_modules/tar/node_modules/mkdirp/lib/use-native.js
+++ /dev/null
@@ -1,10 +0,0 @@
-const fs = require('fs')
-
-const version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version
-const versArr = version.replace(/^v/, '').split('.')
-const hasNative = +versArr[0] > 10 || +versArr[0] === 10 && +versArr[1] >= 12
-
-const useNative = !hasNative ? () => false : opts => opts.mkdir === fs.mkdir
-const useNativeSync = !hasNative ? () => false : opts => opts.mkdirSync === fs.mkdirSync
-
-module.exports = {useNative, useNativeSync}
diff --git a/node_modules/tar/node_modules/mkdirp/package.json b/node_modules/tar/node_modules/mkdirp/package.json
deleted file mode 100644
index 7cc967e..0000000
--- a/node_modules/tar/node_modules/mkdirp/package.json
+++ /dev/null
@@ -1,75 +0,0 @@
-{
- "_from": "mkdirp@^1.0.3",
- "_id": "mkdirp@1.0.4",
- "_inBundle": false,
- "_integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==",
- "_location": "/tar/mkdirp",
- "_phantomChildren": {},
- "_requested": {
- "type": "range",
- "registry": true,
- "raw": "mkdirp@^1.0.3",
- "name": "mkdirp",
- "escapedName": "mkdirp",
- "rawSpec": "^1.0.3",
- "saveSpec": null,
- "fetchSpec": "^1.0.3"
- },
- "_requiredBy": [
- "/tar"
- ],
- "_resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
- "_shasum": "3eb5ed62622756d79a5f0e2a221dfebad75c2f7e",
- "_spec": "mkdirp@^1.0.3",
- "_where": "/home/pruss/Dev/3-minute-website/node_modules/tar",
- "bin": {
- "mkdirp": "bin/cmd.js"
- },
- "bugs": {
- "url": "https://github.com/isaacs/node-mkdirp/issues"
- },
- "bundleDependencies": false,
- "deprecated": false,
- "description": "Recursively mkdir, like `mkdir -p`",
- "devDependencies": {
- "require-inject": "^1.4.4",
- "tap": "^14.10.7"
- },
- "engines": {
- "node": ">=10"
- },
- "files": [
- "bin",
- "lib",
- "index.js"
- ],
- "homepage": "https://github.com/isaacs/node-mkdirp#readme",
- "keywords": [
- "mkdir",
- "directory",
- "make dir",
- "make",
- "dir",
- "recursive",
- "native"
- ],
- "license": "MIT",
- "main": "index.js",
- "name": "mkdirp",
- "repository": {
- "type": "git",
- "url": "git+https://github.com/isaacs/node-mkdirp.git"
- },
- "scripts": {
- "postpublish": "git push origin --follow-tags",
- "postversion": "npm publish",
- "preversion": "npm test",
- "snap": "tap",
- "test": "tap"
- },
- "tap": {
- "check-coverage": true,
- "coverage-map": "map.js"
- },
- "version": "1.0.4"
-}
diff --git a/node_modules/tar/node_modules/mkdirp/readme.markdown b/node_modules/tar/node_modules/mkdirp/readme.markdown
deleted file mode 100644
index 827de59..0000000
--- a/node_modules/tar/node_modules/mkdirp/readme.markdown
+++ /dev/null
@@ -1,266 +0,0 @@
-# mkdirp
-
-Like `mkdir -p`, but in Node.js!
-
-Now with a modern API and no\* bugs!
-
-<small>\* may contain some bugs</small>
-
-# example
-
-## pow.js
-
-```js
-const mkdirp = require('mkdirp')
-
-// return value is a Promise resolving to the first directory created
-mkdirp('/tmp/foo/bar/baz').then(made =>
- console.log(`made directories, starting with ${made}`))
-```
-
-Output (where `/tmp/foo` already exists)
-
-```
-made directories, starting with /tmp/foo/bar
-```
-
-Or, if you don't have time to wait around for promises:
-
-```js
-const mkdirp = require('mkdirp')
-
-// return value is the first directory created
-const made = mkdirp.sync('/tmp/foo/bar/baz')
-console.log(`made directories, starting with ${made}`)
-```
-
-And now /tmp/foo/bar/baz exists, huzzah!
-
-# methods
-
-```js
-const mkdirp = require('mkdirp')
-```
-
-## mkdirp(dir, [opts]) -> Promise<String | undefined>
-
-Create a new directory and any necessary subdirectories at `dir` with octal
-permission string `opts.mode`. If `opts` is a string or number, it will be
-treated as the `opts.mode`.
-
-If `opts.mode` isn't specified, it defaults to `0o777 &
-(~process.umask())`.
-
-Promise resolves to first directory `made` that had to be created, or
-`undefined` if everything already exists. Promise rejects if any errors
-are encountered. Note that, in the case of promise rejection, some
-directories _may_ have been created, as recursive directory creation is not
-an atomic operation.
-
-You can optionally pass in an alternate `fs` implementation by passing in
-`opts.fs`. Your implementation should have `opts.fs.mkdir(path, opts, cb)`
-and `opts.fs.stat(path, cb)`.
-
-You can also override just one or the other of `mkdir` and `stat` by
-passing in `opts.stat` or `opts.mkdir`, or providing an `fs` option that
-only overrides one of these.
-
-## mkdirp.sync(dir, opts) -> String|null
-
-Synchronously create a new directory and any necessary subdirectories at
-`dir` with octal permission string `opts.mode`. If `opts` is a string or
-number, it will be treated as the `opts.mode`.
-
-If `opts.mode` isn't specified, it defaults to `0o777 &
-(~process.umask())`.
-
-Returns the first directory that had to be created, or undefined if
-everything already exists.
-
-You can optionally pass in an alternate `fs` implementation by passing in
-`opts.fs`. Your implementation should have `opts.fs.mkdirSync(path, mode)`
-and `opts.fs.statSync(path)`.
-
-You can also override just one or the other of `mkdirSync` and `statSync`
-by passing in `opts.statSync` or `opts.mkdirSync`, or providing an `fs`
-option that only overrides one of these.
-
-## mkdirp.manual, mkdirp.manualSync
-
-Use the manual implementation (not the native one). This is the default
-when the native implementation is not available or the stat/mkdir
-implementation is overridden.
-
-## mkdirp.native, mkdirp.nativeSync
-
-Use the native implementation (not the manual one). This is the default
-when the native implementation is available and stat/mkdir are not
-overridden.
-
-# implementation
-
-On Node.js v10.12.0 and above, use the native `fs.mkdir(p,
-{recursive:true})` option, unless `fs.mkdir`/`fs.mkdirSync` has been
-overridden by an option.
-
-## native implementation
-
-- If the path is a root directory, then pass it to the underlying
- implementation and return the result/error. (In this case, it'll either
- succeed or fail, but we aren't actually creating any dirs.)
-- Walk up the path statting each directory, to find the first path that
- will be created, `made`.
-- Call `fs.mkdir(path, { recursive: true })` (or `fs.mkdirSync`)
-- If error, raise it to the caller.
-- Return `made`.
-
-## manual implementation
-
-- Call underlying `fs.mkdir` implementation, with `recursive: false`
-- If error:
- - If path is a root directory, raise to the caller and do not handle it
- - If ENOENT, mkdirp parent dir, store result as `made`
- - stat(path)
- - If error, raise original `mkdir` error
- - If directory, return `made`
- - Else, raise original `mkdir` error
-- else
- - return `undefined` if a root dir, or `made` if set, or `path`
-
-## windows vs unix caveat
-
-On Windows file systems, attempts to create a root directory (ie, a drive
-letter or root UNC path) will fail. If the root directory exists, then it
-will fail with `EPERM`. If the root directory does not exist, then it will
-fail with `ENOENT`.
-
-On posix file systems, attempts to create a root directory (in recursive
-mode) will succeed silently, as it is treated like just another directory
-that already exists. (In non-recursive mode, of course, it fails with
-`EEXIST`.)
-
-In order to preserve this system-specific behavior (and because it's not as
-if we can create the parent of a root directory anyway), attempts to create
-a root directory are passed directly to the `fs` implementation, and any
-errors encountered are not handled.
-
-## native error caveat
-
-The native implementation (as of at least Node.js v13.4.0) does not provide
-appropriate errors in some cases (see
-[nodejs/node#31481](https://github.com/nodejs/node/issues/31481) and
-[nodejs/node#28015](https://github.com/nodejs/node/issues/28015)).
-
-In order to work around this issue, the native implementation will fall
-back to the manual implementation if an `ENOENT` error is encountered.
-
-# choosing a recursive mkdir implementation
-
-There are a few to choose from! Use the one that suits your needs best :D
-
-## use `fs.mkdir(path, {recursive: true}, cb)` if:
-
-- You wish to optimize performance even at the expense of other factors.
-- You don't need to know the first dir created.
-- You are ok with getting `ENOENT` as the error when some other problem is
- the actual cause.
-- You can limit your platforms to Node.js v10.12 and above.
-- You're ok with using callbacks instead of promises.
-- You don't need/want a CLI.
-- You don't need to override the `fs` methods in use.
-
-## use this module (mkdirp 1.x) if:
-
-- You need to know the first directory that was created.
-- You wish to use the native implementation if available, but fall back
- when it's not.
-- You prefer promise-returning APIs to callback-taking APIs.
-- You want more useful error messages than the native recursive mkdir
- provides (at least as of Node.js v13.4), and are ok with re-trying on
- `ENOENT` to achieve this.
-- You need (or at least, are ok with) a CLI.
-- You need to override the `fs` methods in use.
-
-## use [`make-dir`](http://npm.im/make-dir) if:
-
-- You do not need to know the first dir created (and wish to save a few
- `stat` calls when using the native implementation for this reason).
-- You wish to use the native implementation if available, but fall back
- when it's not.
-- You prefer promise-returning APIs to callback-taking APIs.
-- You are ok with occasionally getting `ENOENT` errors for failures that
- are actually related to something other than a missing file system entry.
-- You don't need/want a CLI.
-- You need to override the `fs` methods in use.
-
-## use mkdirp 0.x if:
-
-- You need to know the first directory that was created.
-- You need (or at least, are ok with) a CLI.
-- You need to override the `fs` methods in use.
-- You're ok with using callbacks instead of promises.
-- You are not running on Windows, where the root-level ENOENT errors can
- lead to infinite regress.
-- You think vinyl just sounds warmer and richer for some weird reason.
-- You are supporting truly ancient Node.js versions, before even the advent
- of a `Promise` language primitive. (Please don't. You deserve better.)
-
-# cli
-
-This package also ships with a `mkdirp` command.
-
-```
-$ mkdirp -h
-
-usage: mkdirp [DIR1,DIR2..] {OPTIONS}
-
- Create each supplied directory including any necessary parent directories
- that don't yet exist.
-
- If the directory already exists, do nothing.
-
-OPTIONS are:
-
- -m<mode> If a directory needs to be created, set the mode as an octal
- --mode=<mode> permission string.
-
- -v --version Print the mkdirp version number
-
- -h --help Print this helpful banner
-
- -p --print Print the first directories created for each path provided
-
- --manual Use manual implementation, even if native is available
-```
-
-# install
-
-With [npm](http://npmjs.org) do:
-
-```
-npm install mkdirp
-```
-
-to get the library locally, or
-
-```
-npm install -g mkdirp
-```
-
-to get the command everywhere, or
-
-```
-npx mkdirp ...
-```
-
-to run the command without installing it globally.
-
-# platform support
-
-This module works on node v8, but only v10 and above are officially
-supported, as Node v8 reached its LTS end of life 2020-01-01, which is in
-the past, as of this writing.
-
-# license
-
-MIT
diff --git a/node_modules/tar/package.json b/node_modules/tar/package.json
deleted file mode 100644
index 212d140..0000000
--- a/node_modules/tar/package.json
+++ /dev/null
@@ -1,81 +0,0 @@
-{
- "_from": "tar@^6.0.2",
- "_id": "tar@6.0.5",
- "_inBundle": false,
- "_integrity": "sha512-0b4HOimQHj9nXNEAA7zWwMM91Zhhba3pspja6sQbgTpynOJf+bkjBnfybNYzbpLbnwXnbyB4LOREvlyXLkCHSg==",
- "_location": "/tar",
- "_phantomChildren": {},
- "_requested": {
- "type": "range",
- "registry": true,
- "raw": "tar@^6.0.2",
- "name": "tar",
- "escapedName": "tar",
- "rawSpec": "^6.0.2",
- "saveSpec": null,
- "fetchSpec": "^6.0.2"
- },
- "_requiredBy": [
- "/cacache"
- ],
- "_resolved": "https://registry.npmjs.org/tar/-/tar-6.0.5.tgz",
- "_shasum": "bde815086e10b39f1dcd298e89d596e1535e200f",
- "_spec": "tar@^6.0.2",
- "_where": "/home/pruss/Dev/3-minute-website/node_modules/cacache",
- "author": {
- "name": "Isaac Z. Schlueter",
- "email": "i@izs.me",
- "url": "http://blog.izs.me/"
- },
- "bugs": {
- "url": "https://github.com/npm/node-tar/issues"
- },
- "bundleDependencies": false,
- "dependencies": {
- "chownr": "^2.0.0",
- "fs-minipass": "^2.0.0",
- "minipass": "^3.0.0",
- "minizlib": "^2.1.1",
- "mkdirp": "^1.0.3",
- "yallist": "^4.0.0"
- },
- "deprecated": false,
- "description": "tar for node",
- "devDependencies": {
- "chmodr": "^1.2.0",
- "end-of-stream": "^1.4.3",
- "events-to-array": "^1.1.2",
- "mutate-fs": "^2.1.1",
- "rimraf": "^2.7.1",
- "tap": "^14.9.2",
- "tar-fs": "^1.16.3",
- "tar-stream": "^1.6.2"
- },
- "engines": {
- "node": ">= 10"
- },
- "files": [
- "index.js",
- "lib/*.js"
- ],
- "homepage": "https://github.com/npm/node-tar#readme",
- "license": "ISC",
- "name": "tar",
- "repository": {
- "type": "git",
- "url": "git+https://github.com/npm/node-tar.git"
- },
- "scripts": {
- "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done",
- "genparse": "node scripts/generate-parse-fixtures.js",
- "postversion": "npm publish",
- "prepublishOnly": "git push origin --follow-tags",
- "preversion": "npm test",
- "test": "tap"
- },
- "tap": {
- "coverage-map": "map.js",
- "check-coverage": true
- },
- "version": "6.0.5"
-}