Update dashboard, kb, memory +4 more (+28 ~18 -1)
This commit is contained in:
21
node_modules/@exodus/bytes/LICENSE
generated
vendored
Normal file
21
node_modules/@exodus/bytes/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024-2025 Exodus Movement
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
885
node_modules/@exodus/bytes/README.md
generated
vendored
Normal file
885
node_modules/@exodus/bytes/README.md
generated
vendored
Normal file
@@ -0,0 +1,885 @@
|
||||
# `@exodus/bytes`
|
||||
|
||||
[](https://npmjs.org/package/@exodus/bytes)
|
||||
[](https://github.com/ExodusOSS/bytes/releases)
|
||||
[](https://www.npmcharts.com/compare/@exodus/bytes?minimal=true)
|
||||
[](https://github.com/ExodusOSS/bytes/blob/HEAD/LICENSE)
|
||||
[](https://github.com/ExodusOSS/bytes/actions/workflows/test.yml?query=branch%3Amain)
|
||||
|
||||
`Uint8Array` conversion to and from `base64`, `base32`, `base58`, `hex`, `utf8`, `utf16`, `bech32` and `wif`
|
||||
|
||||
And a [`TextEncoder` / `TextDecoder` polyfill](#textencoder--textdecoder-polyfill)
|
||||
|
||||
See [documentation](https://exodusoss.github.io/bytes).
|
||||
|
||||
## Strict
|
||||
|
||||
Performs proper input validation, ensures no garbage-in-garbage-out
|
||||
|
||||
Tested on Node.js, Deno, Bun, browsers (including Servo), Hermes, QuickJS and barebone engines in CI [(how?)](https://github.com/ExodusMovement/test#exodustest)
|
||||
|
||||
## Fast
|
||||
|
||||
* `10-20x` faster than `Buffer` polyfill
|
||||
* `2-10x` faster than `iconv-lite`
|
||||
|
||||
The above was for the js fallback
|
||||
|
||||
It's up to `100x` when native impl is available \
|
||||
e.g. in `utf8fromString` on Hermes / React Native or `fromHex` in Chrome
|
||||
|
||||
Also:
|
||||
* `3-8x` faster than `bs58`
|
||||
* `10-30x` faster than `@scure/base` (or `>100x` on Node.js <25)
|
||||
* Faster in `utf8toString` / `utf8fromString` than `Buffer` or `TextDecoder` / `TextEncoder` on Node.js
|
||||
|
||||
See [Performance](./Performance.md) for more info
|
||||
|
||||
## TextEncoder / TextDecoder polyfill
|
||||
|
||||
```js
|
||||
import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding.js'
|
||||
import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding.js' // Requires Streams
|
||||
```
|
||||
|
||||
Less than half the bundle size of [text-encoding](https://npmjs.com/text-encoding), [whatwg-encoding](https://npmjs.com/whatwg-encoding) or [iconv-lite](https://npmjs.com/iconv-lite) (gzipped or not).\
|
||||
Also [much faster](#fast) than all of those.
|
||||
|
||||
> [!TIP]
|
||||
> See also the [lite version](#lite-version) to get this down to 10 KiB gzipped.
|
||||
|
||||
Spec compliant, passing WPT and covered with extra tests.\
|
||||
Moreover, tests for this library uncovered [bugs in all major implementations](https://docs.google.com/spreadsheets/d/1pdEefRG6r9fZy61WHGz0TKSt8cO4ISWqlpBN5KntIvQ/edit).\
|
||||
Including all three major browser engines being wrong at UTF-8.\
|
||||
See [WPT pull request](https://github.com/web-platform-tests/wpt/pull/56892).
|
||||
|
||||
It works correctly even in environments that have native implementations broken (that's all of them currently).\
|
||||
Runs (and passes WPT) on Node.js built without ICU.
|
||||
|
||||
> [!NOTE]
|
||||
> [Faster than Node.js native implementation on Node.js](https://github.com/nodejs/node/issues/61041#issuecomment-3649242024).
|
||||
>
|
||||
> The JS multi-byte version is as fast as native impl in Node.js and browsers, but (unlike them) returns correct results.
|
||||
>
|
||||
> For encodings where native version is known to be fast and correct, it is automatically used.\
|
||||
> Some single-byte encodings are faster than native in all three major browser engines.
|
||||
|
||||
See [analysis table](https://docs.google.com/spreadsheets/d/1pdEefRG6r9fZy61WHGz0TKSt8cO4ISWqlpBN5KntIvQ/edit) for more info.
|
||||
|
||||
### Caveat: `TextDecoder` / `TextEncoder` APIs are lossy by default per spec
|
||||
|
||||
_These are only provided as a compatibility layer, prefer hardened APIs instead in new code._
|
||||
|
||||
* `TextDecoder` can (and should) be used with `{ fatal: true }` option for all purposes demanding correctness / lossless transforms
|
||||
|
||||
* `TextEncoder` does not support a fatal mode per spec, it always performs replacement.
|
||||
|
||||
That is not suitable for hashing, cryptography or consensus applications.\
|
||||
Otherwise there would be non-equal strings with equal signatures and hashes — the collision is caused by the lossy transform of a JS string to bytes.
|
||||
Those also survive e.g. `JSON.stringify`/`JSON.parse` or being sent over network.
|
||||
|
||||
Use strict APIs in new applications, see `utf8fromString` / `utf16fromString` below.\
|
||||
Those throw on non-well-formed strings by default.
|
||||
|
||||
### Lite version
|
||||
|
||||
If you don't need support for legacy multi-byte encodings, you can use the lite import:
|
||||
```js
|
||||
import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding-lite.js'
|
||||
import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-lite.js' // Requires Streams
|
||||
```
|
||||
|
||||
This reduces the bundle size 9x:\
|
||||
from 90 KiB gzipped for `@exodus/bytes/encoding.js` to 10 KiB gzipped for `@exodus/bytes/encoding-lite.js`.\
|
||||
(For comparison, `text-encoding` module is 190 KiB gzipped, and `iconv-lite` is 194 KiB gzipped):
|
||||
|
||||
It still supports `utf-8`, `utf-16le`, `utf-16be` and all single-byte encodings specified by the spec,
|
||||
the only difference is support for legacy multi-byte encodings.
|
||||
|
||||
See [the list of encodings](https://encoding.spec.whatwg.org/#names-and-labels).
|
||||
|
||||
## API
|
||||
|
||||
### `@exodus/bytes/utf8.js`
|
||||
|
||||
UTF-8 encoding/decoding
|
||||
|
||||
```js
|
||||
import { utf8fromString, utf8toString } from '@exodus/bytes/utf8.js'
|
||||
|
||||
// loose
|
||||
import { utf8fromStringLoose, utf8toStringLoose } from '@exodus/bytes/utf8.js'
|
||||
```
|
||||
|
||||
_These methods by design encode/decode BOM (codepoint `U+FEFF` Byte Order Mark) as-is._\
|
||||
_If you need BOM handling or detection, use `@exodus/bytes/encoding.js`_
|
||||
|
||||
#### `utf8fromString(string, format = 'uint8')`
|
||||
|
||||
Encode a string to UTF-8 bytes (strict mode)
|
||||
|
||||
Throws on invalid Unicode (unpaired surrogates)
|
||||
|
||||
This is similar to the following snippet (but works on all engines):
|
||||
```js
|
||||
// Strict encode, requiring Unicode codepoints to be valid
|
||||
if (typeof string !== 'string' || !string.isWellFormed()) throw new TypeError()
|
||||
return new TextEncoder().encode(string)
|
||||
```
|
||||
|
||||
#### `utf8fromStringLoose(string, format = 'uint8')`
|
||||
|
||||
Encode a string to UTF-8 bytes (loose mode)
|
||||
|
||||
Replaces invalid Unicode (unpaired surrogates) with replacement codepoints `U+FFFD`
|
||||
per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
|
||||
_Such replacement is a non-injective function, is irreversable and causes collisions.\
|
||||
Prefer using strict throwing methods for cryptography applications._
|
||||
|
||||
This is similar to the following snippet (but works on all engines):
|
||||
```js
|
||||
// Loose encode, replacing invalid Unicode codepoints with U+FFFD
|
||||
if (typeof string !== 'string') throw new TypeError()
|
||||
return new TextEncoder().encode(string)
|
||||
```
|
||||
|
||||
#### `utf8toString(arr)`
|
||||
|
||||
Decode UTF-8 bytes to a string (strict mode)
|
||||
|
||||
Throws on invalid UTF-8 byte sequences
|
||||
|
||||
This is similar to `new TextDecoder('utf-8', { fatal: true, ignoreBOM: true }).decode(arr)`,
|
||||
but works on all engines.
|
||||
|
||||
#### `utf8toStringLoose(arr)`
|
||||
|
||||
Decode UTF-8 bytes to a string (loose mode)
|
||||
|
||||
Replaces invalid UTF-8 byte sequences with replacement codepoints `U+FFFD`
|
||||
per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
|
||||
_Such replacement is a non-injective function, is irreversable and causes collisions.\
|
||||
Prefer using strict throwing methods for cryptography applications._
|
||||
|
||||
This is similar to `new TextDecoder('utf-8', { ignoreBOM: true }).decode(arr)`,
|
||||
but works on all engines.
|
||||
|
||||
### `@exodus/bytes/utf16.js`
|
||||
|
||||
UTF-16 encoding/decoding
|
||||
|
||||
```js
|
||||
import { utf16fromString, utf16toString } from '@exodus/bytes/utf16.js'
|
||||
|
||||
// loose
|
||||
import { utf16fromStringLoose, utf16toStringLoose } from '@exodus/bytes/utf16.js'
|
||||
```
|
||||
|
||||
_These methods by design encode/decode BOM (codepoint `U+FEFF` Byte Order Mark) as-is._\
|
||||
_If you need BOM handling or detection, use `@exodus/bytes/encoding.js`_
|
||||
|
||||
#### `utf16fromString(string, format = 'uint16')`
|
||||
|
||||
Encode a string to UTF-16 bytes (strict mode)
|
||||
|
||||
Throws on invalid Unicode (unpaired surrogates)
|
||||
|
||||
#### `utf16fromStringLoose(string, format = 'uint16')`
|
||||
|
||||
Encode a string to UTF-16 bytes (loose mode)
|
||||
|
||||
Replaces invalid Unicode (unpaired surrogates) with replacement codepoints `U+FFFD`
|
||||
per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
|
||||
_Such replacement is a non-injective function, is irreversible and causes collisions.\
|
||||
Prefer using strict throwing methods for cryptography applications._
|
||||
|
||||
#### `utf16toString(arr, format = 'uint16')`
|
||||
|
||||
Decode UTF-16 bytes to a string (strict mode)
|
||||
|
||||
Throws on invalid UTF-16 byte sequences
|
||||
|
||||
Throws on non-even byte length.
|
||||
|
||||
#### `utf16toStringLoose(arr, format = 'uint16')`
|
||||
|
||||
Decode UTF-16 bytes to a string (loose mode)
|
||||
|
||||
Replaces invalid UTF-16 byte sequences with replacement codepoints `U+FFFD`
|
||||
per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
|
||||
_Such replacement is a non-injective function, is irreversible and causes collisions.\
|
||||
Prefer using strict throwing methods for cryptography applications._
|
||||
|
||||
Throws on non-even byte length.
|
||||
|
||||
### `@exodus/bytes/single-byte.js`
|
||||
|
||||
Decode / encode the legacy single-byte encodings according to the
|
||||
[Encoding standard](https://encoding.spec.whatwg.org/)
|
||||
([§9](https://encoding.spec.whatwg.org/#legacy-single-byte-encodings),
|
||||
[§14.5](https://encoding.spec.whatwg.org/#x-user-defined)),
|
||||
and [unicode.org](https://unicode.org/Public/MAPPINGS/ISO8859) `iso-8859-*` mappings.
|
||||
|
||||
```js
|
||||
import { createSinglebyteDecoder, createSinglebyteEncoder } from '@exodus/bytes/single-byte.js'
|
||||
import { windows1252toString, windows1252fromString } from '@exodus/bytes/single-byte.js'
|
||||
import { latin1toString, latin1fromString } from '@exodus/bytes/single-byte.js'
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> This is a lower-level API for single-byte encodings.
|
||||
> It might not match what you expect, as it supports both WHATWG and unicode.org encodings under
|
||||
> different names, with the main intended usecase for the latter being either non-web or legacy contexts.
|
||||
>
|
||||
> For a safe WHATWG Encoding-compatible API, see `@exodus/bytes/encoding.js` import (and variants of it).
|
||||
>
|
||||
> Be sure to know what you are doing and check documentation when directly using encodings from this file.
|
||||
|
||||
Supports all single-byte encodings listed in the WHATWG Encoding standard:
|
||||
`ibm866`, `iso-8859-2`, `iso-8859-3`, `iso-8859-4`, `iso-8859-5`, `iso-8859-6`, `iso-8859-7`, `iso-8859-8`,
|
||||
`iso-8859-8-i`, `iso-8859-10`, `iso-8859-13`, `iso-8859-14`, `iso-8859-15`, `iso-8859-16`, `koi8-r`, `koi8-u`,
|
||||
`macintosh`, `windows-874`, `windows-1250`, `windows-1251`, `windows-1252`, `windows-1253`, `windows-1254`,
|
||||
`windows-1255`, `windows-1256`, `windows-1257`, `windows-1258`, `x-mac-cyrillic` and `x-user-defined`.
|
||||
|
||||
Also supports `iso-8859-1`, `iso-8859-9`, `iso-8859-11` as defined at
|
||||
[unicode.org](https://unicode.org/Public/MAPPINGS/ISO8859)
|
||||
(and all other `iso-8859-*` encodings there as they match WHATWG).
|
||||
|
||||
> [!NOTE]
|
||||
> While all `iso-8859-*` encodings supported by the [WHATWG Encoding standard](https://encoding.spec.whatwg.org/) match
|
||||
> [unicode.org](https://unicode.org/Public/MAPPINGS/ISO8859), the WHATWG Encoding spec doesn't support
|
||||
> `iso-8859-1`, `iso-8859-9`, `iso-8859-11`, and instead maps them as labels to `windows-1252`, `windows-1254`, `windows-874`.\
|
||||
> `createSinglebyteDecoder()` (unlike `TextDecoder` or `legacyHookDecode()`) does not do such mapping,
|
||||
> so its results will differ from `TextDecoder` for those encoding names.
|
||||
|
||||
```js
|
||||
> new TextDecoder('iso-8859-1').encoding
|
||||
'windows-1252'
|
||||
> new TextDecoder('iso-8859-9').encoding
|
||||
'windows-1254'
|
||||
> new TextDecoder('iso-8859-11').encoding
|
||||
'windows-874'
|
||||
> new TextDecoder('iso-8859-9').decode(Uint8Array.of(0x80, 0x81, 0xd0))
|
||||
'€\x81Ğ' // this is actually decoded according to windows-1254 per TextDecoder spec
|
||||
> createSinglebyteDecoder('iso-8859-9')(Uint8Array.of(0x80, 0x81, 0xd0))
|
||||
'\x80\x81Ğ' // this is iso-8859-9 as defined at https://unicode.org/Public/MAPPINGS/ISO8859/8859-9.txt
|
||||
```
|
||||
|
||||
All WHATWG Encoding spec [`windows-*` encodings](https://encoding.spec.whatwg.org/#windows-874) are supersets of
|
||||
corresponding [unicode.org encodings](https://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/), meaning that
|
||||
they encode/decode all the old valid (non-replacement) strings / byte sequences identically, but can also support
|
||||
a wider range of inputs.
|
||||
|
||||
#### `createSinglebyteDecoder(encoding, loose = false)`
|
||||
|
||||
Create a decoder for a supported one-byte `encoding`, given its lowercased name `encoding`.
|
||||
|
||||
Returns a function `decode(arr)` that decodes bytes to a string.
|
||||
|
||||
#### `createSinglebyteEncoder(encoding, { mode = 'fatal' })`
|
||||
|
||||
Create an encoder for a supported one-byte `encoding`, given its lowercased name `encoding`.
|
||||
|
||||
Returns a function `encode(string)` that encodes a string to bytes.
|
||||
|
||||
In `'fatal'` mode (default), will throw on non well-formed strings or any codepoints which could
|
||||
not be encoded in the target encoding.
|
||||
|
||||
#### `latin1toString(arr)`
|
||||
|
||||
Decode `iso-8859-1` bytes to a string.
|
||||
|
||||
There is no loose variant for this encoding, all bytes can be decoded.
|
||||
|
||||
Same as:
|
||||
```js
|
||||
const latin1toString = createSinglebyteDecoder('iso-8859-1')
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> This is different from `new TextDecoder('iso-8859-1')` and `new TextDecoder('latin1')`, as those
|
||||
> alias to `new TextDecoder('windows-1252')`.
|
||||
|
||||
#### `latin1fromString(string)`
|
||||
|
||||
Encode a string to `iso-8859-1` bytes.
|
||||
|
||||
Throws on non well-formed strings or any codepoints which could not be encoded in `iso-8859-1`.
|
||||
|
||||
Same as:
|
||||
```js
|
||||
const latin1fromString = createSinglebyteEncoder('iso-8859-1', { mode: 'fatal' })
|
||||
```
|
||||
|
||||
#### `windows1252toString(arr)`
|
||||
|
||||
Decode `windows-1252` bytes to a string.
|
||||
|
||||
There is no loose variant for this encoding, all bytes can be decoded.
|
||||
|
||||
Same as:
|
||||
```js
|
||||
const windows1252toString = createSinglebyteDecoder('windows-1252')
|
||||
```
|
||||
|
||||
#### `windows1252fromString(string)`
|
||||
|
||||
Encode a string to `windows-1252` bytes.
|
||||
|
||||
Throws on non well-formed strings or any codepoints which could not be encoded in `windows-1252`.
|
||||
|
||||
Same as:
|
||||
```js
|
||||
const windows1252fromString = createSinglebyteEncoder('windows-1252', { mode: 'fatal' })
|
||||
```
|
||||
|
||||
### `@exodus/bytes/multi-byte.js`
|
||||
|
||||
Decode / encode the legacy multi-byte encodings according to the
|
||||
[Encoding standard](https://encoding.spec.whatwg.org/)
|
||||
([§10](https://encoding.spec.whatwg.org/#legacy-multi-byte-chinese-(simplified)-encodings),
|
||||
[§11](https://encoding.spec.whatwg.org/#legacy-multi-byte-chinese-(traditional)-encodings),
|
||||
[§12](https://encoding.spec.whatwg.org/#legacy-multi-byte-japanese-encodings),
|
||||
[§13](https://encoding.spec.whatwg.org/#legacy-multi-byte-korean-encodings)).
|
||||
|
||||
```js
|
||||
import { createMultibyteDecoder, createMultibyteEncoder } from '@exodus/bytes/multi-byte.js'
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> This is a lower-level API for legacy multi-byte encodings.
|
||||
>
|
||||
> For a safe WHATWG Encoding-compatible API, see `@exodus/bytes/encoding.js` import (and variants of it).
|
||||
>
|
||||
> Be sure to know what you are doing and check documentation when directly using encodings from this file.
|
||||
|
||||
Supports all legacy multi-byte encodings listed in the WHATWG Encoding standard:
|
||||
`gbk`, `gb18030`, `big5`, `euc-jp`, `iso-2022-jp`, `shift_jis`, `euc-kr`.
|
||||
|
||||
#### `createMultibyteDecoder(encoding, loose = false)`
|
||||
|
||||
Create a decoder for a supported legacy multi-byte `encoding`, given its lowercased name `encoding`.
|
||||
|
||||
Returns a function `decode(arr, stream = false)` that decodes bytes to a string.
|
||||
|
||||
The returned function will maintain internal state while `stream = true` is used, allowing it to
|
||||
handle incomplete multi-byte sequences across multiple calls.
|
||||
State is reset when `stream = false` or when the function is called without the `stream` parameter.
|
||||
|
||||
#### `createMultibyteEncoder(encoding, { mode = 'fatal' })`
|
||||
|
||||
Create an encoder for a supported legacy multi-byte `encoding`, given its lowercased name `encoding`.
|
||||
|
||||
Returns a function `encode(string)` that encodes a string to bytes.
|
||||
|
||||
In `'fatal'` mode (default), will throw on non well-formed strings or any codepoints which could
|
||||
not be encoded in the target encoding.
|
||||
|
||||
### `@exodus/bytes/bigint.js`
|
||||
|
||||
Convert between BigInt and Uint8Array
|
||||
|
||||
```js
|
||||
import { fromBigInt, toBigInt } from '@exodus/bytes/bigint.js'
|
||||
```
|
||||
|
||||
#### `fromBigInt(bigint, { length, format = 'uint8' })`
|
||||
|
||||
Convert a BigInt to a Uint8Array or Buffer
|
||||
|
||||
The output bytes are in big-endian format.
|
||||
|
||||
Throws if the BigInt is negative or cannot fit into the specified length.
|
||||
|
||||
#### `toBigInt(arr)`
|
||||
|
||||
Convert a Uint8Array or Buffer to a BigInt
|
||||
|
||||
The bytes are interpreted as a big-endian unsigned integer.
|
||||
|
||||
### `@exodus/bytes/hex.js`
|
||||
|
||||
Implements Base16 from [RFC4648](https://datatracker.ietf.org/doc/html/rfc4648)
|
||||
(no differences from [RFC3548](https://datatracker.ietf.org/doc/html/rfc4648)).
|
||||
|
||||
```js
|
||||
import { fromHex, toHex } from '@exodus/bytes/hex.js'
|
||||
```
|
||||
|
||||
#### `fromHex(string, format = 'uint8')`
|
||||
|
||||
Decode a hex string to bytes
|
||||
|
||||
Unlike `Buffer.from()`, throws on invalid input
|
||||
|
||||
#### `toHex(arr)`
|
||||
|
||||
Encode a `Uint8Array` to a lowercase hex string
|
||||
|
||||
### `@exodus/bytes/base64.js`
|
||||
|
||||
Implements base64 and base64url from [RFC4648](https://datatracker.ietf.org/doc/html/rfc4648)
|
||||
(no differences from [RFC3548](https://datatracker.ietf.org/doc/html/rfc4648)).
|
||||
|
||||
```js
|
||||
import { fromBase64, toBase64 } from '@exodus/bytes/base64.js'
|
||||
import { fromBase64url, toBase64url } from '@exodus/bytes/base64.js'
|
||||
import { fromBase64any } from '@exodus/bytes/base64.js'
|
||||
```
|
||||
|
||||
#### `fromBase64(string, { format = 'uint8', padding = 'both' })`
|
||||
|
||||
Decode a base64 string to bytes
|
||||
|
||||
Operates in strict mode for last chunk, does not allow whitespace
|
||||
|
||||
#### `fromBase64url(string, { format = 'uint8', padding = false })`
|
||||
|
||||
Decode a base64url string to bytes
|
||||
|
||||
Operates in strict mode for last chunk, does not allow whitespace
|
||||
|
||||
#### `fromBase64any(string, { format = 'uint8', padding = 'both' })`
|
||||
|
||||
Decode either base64 or base64url string to bytes
|
||||
|
||||
Automatically detects the variant based on characters present
|
||||
|
||||
#### `toBase64(arr, { padding = true })`
|
||||
|
||||
Encode a `Uint8Array` to a base64 string (RFC 4648)
|
||||
|
||||
#### `toBase64url(arr, { padding = false })`
|
||||
|
||||
Encode a `Uint8Array` to a base64url string (RFC 4648)
|
||||
|
||||
### `@exodus/bytes/base32.js`
|
||||
|
||||
Implements base32 and base32hex from [RFC4648](https://datatracker.ietf.org/doc/html/rfc4648)
|
||||
(no differences from [RFC3548](https://datatracker.ietf.org/doc/html/rfc4648)).
|
||||
|
||||
```js
|
||||
import { fromBase32, toBase32 } from '@exodus/bytes/base32.js'
|
||||
import { fromBase32hex, toBase32hex } from '@exodus/bytes/base32.js'
|
||||
```
|
||||
|
||||
#### `fromBase32(string, { format = 'uint8', padding = 'both' })`
|
||||
|
||||
Decode a base32 string to bytes
|
||||
|
||||
Operates in strict mode for last chunk, does not allow whitespace
|
||||
|
||||
#### `fromBase32hex(string, { format = 'uint8', padding = 'both' })`
|
||||
|
||||
Decode a base32hex string to bytes
|
||||
|
||||
Operates in strict mode for last chunk, does not allow whitespace
|
||||
|
||||
#### `toBase32(arr, { padding = false })`
|
||||
|
||||
Encode a `Uint8Array` to a base32 string (RFC 4648)
|
||||
|
||||
#### `toBase32hex(arr, { padding = false })`
|
||||
|
||||
Encode a `Uint8Array` to a base32hex string (RFC 4648)
|
||||
|
||||
### `@exodus/bytes/bech32.js`
|
||||
|
||||
Implements bech32 and bech32m from
|
||||
[BIP-0173](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#specification)
|
||||
and [BIP-0350](https://github.com/bitcoin/bips/blob/master/bip-0350.mediawiki#specification).
|
||||
|
||||
```js
|
||||
import { fromBech32, toBech32 } from '@exodus/bytes/bech32.js'
|
||||
import { fromBech32m, toBech32m } from '@exodus/bytes/bech32.js'
|
||||
import { getPrefix } from '@exodus/bytes/bech32.js'
|
||||
```
|
||||
|
||||
#### `getPrefix(string, limit = 90)`
|
||||
|
||||
Extract the prefix from a bech32 or bech32m string without full validation
|
||||
|
||||
This is a quick check that skips most validation.
|
||||
|
||||
#### `fromBech32(string, limit = 90)`
|
||||
|
||||
Decode a bech32 string to bytes
|
||||
|
||||
#### `toBech32(prefix, bytes, limit = 90)`
|
||||
|
||||
Encode bytes to a bech32 string
|
||||
|
||||
#### `fromBech32m(string, limit = 90)`
|
||||
|
||||
Decode a bech32m string to bytes
|
||||
|
||||
#### `toBech32m(prefix, bytes, limit = 90)`
|
||||
|
||||
Encode bytes to a bech32m string
|
||||
|
||||
### `@exodus/bytes/base58.js`
|
||||
|
||||
Implements [base58](https://www.ietf.org/archive/id/draft-msporny-base58-03.txt) encoding.
|
||||
|
||||
Supports both standard base58 and XRP variant alphabets.
|
||||
|
||||
```js
|
||||
import { fromBase58, toBase58 } from '@exodus/bytes/base58.js'
|
||||
import { fromBase58xrp, toBase58xrp } from '@exodus/bytes/base58.js'
|
||||
```
|
||||
|
||||
#### `fromBase58(string, format = 'uint8')`
|
||||
|
||||
Decode a base58 string to bytes
|
||||
|
||||
Uses the standard Bitcoin base58 alphabet
|
||||
|
||||
#### `toBase58(arr)`
|
||||
|
||||
Encode a `Uint8Array` to a base58 string
|
||||
|
||||
Uses the standard Bitcoin base58 alphabet
|
||||
|
||||
#### `fromBase58xrp(string, format = 'uint8')`
|
||||
|
||||
Decode a base58 string to bytes using XRP alphabet
|
||||
|
||||
Uses the XRP variant base58 alphabet
|
||||
|
||||
#### `toBase58xrp(arr)`
|
||||
|
||||
Encode a `Uint8Array` to a base58 string using XRP alphabet
|
||||
|
||||
Uses the XRP variant base58 alphabet
|
||||
|
||||
### `@exodus/bytes/base58check.js`
|
||||
|
||||
Implements [base58check](https://en.bitcoin.it/wiki/Base58Check_encoding) encoding.
|
||||
|
||||
```js
|
||||
import { fromBase58check, toBase58check } from '@exodus/bytes/base58check.js'
|
||||
import { fromBase58checkSync, toBase58checkSync } from '@exodus/bytes/base58check.js'
|
||||
import { makeBase58check } from '@exodus/bytes/base58check.js'
|
||||
```
|
||||
|
||||
On non-Node.js, requires peer dependency [@noble/hashes](https://www.npmjs.com/package/@noble/hashes) to be installed.
|
||||
|
||||
#### `async fromBase58check(string, format = 'uint8')`
|
||||
|
||||
Decode a base58check string to bytes asynchronously
|
||||
|
||||
Validates the checksum using double SHA-256
|
||||
|
||||
#### `async toBase58check(arr)`
|
||||
|
||||
Encode bytes to base58check string asynchronously
|
||||
|
||||
Uses double SHA-256 for checksum calculation
|
||||
|
||||
#### `fromBase58checkSync(string, format = 'uint8')`
|
||||
|
||||
Decode a base58check string to bytes synchronously
|
||||
|
||||
Validates the checksum using double SHA-256
|
||||
|
||||
#### `toBase58checkSync(arr)`
|
||||
|
||||
Encode bytes to base58check string synchronously
|
||||
|
||||
Uses double SHA-256 for checksum calculation
|
||||
|
||||
#### `makeBase58check(hashAlgo, hashAlgoSync)`
|
||||
|
||||
Create a base58check encoder/decoder with custom hash functions
|
||||
|
||||
### `@exodus/bytes/wif.js`
|
||||
|
||||
Wallet Import Format (WIF) encoding and decoding.
|
||||
|
||||
```js
|
||||
import { fromWifString, toWifString } from '@exodus/bytes/wif.js'
|
||||
import { fromWifStringSync, toWifStringSync } from '@exodus/bytes/wif.js'
|
||||
```
|
||||
|
||||
On non-Node.js, requires peer dependency [@noble/hashes](https://www.npmjs.com/package/@noble/hashes) to be installed.
|
||||
|
||||
#### `async fromWifString(string[, version])`
|
||||
|
||||
Decode a WIF string to WIF data
|
||||
|
||||
Returns a promise that resolves to an object with `{ version, privateKey, compressed }`.
|
||||
|
||||
The optional `version` parameter validates the version byte.
|
||||
|
||||
Throws if the WIF string is invalid or version doesn't match.
|
||||
|
||||
#### `fromWifStringSync(string[, version])`
|
||||
|
||||
Decode a WIF string to WIF data (synchronous)
|
||||
|
||||
Returns an object with `{ version, privateKey, compressed }`.
|
||||
|
||||
The optional `version` parameter validates the version byte.
|
||||
|
||||
Throws if the WIF string is invalid or version doesn't match.
|
||||
|
||||
#### `async toWifString({ version, privateKey, compressed })`
|
||||
|
||||
Encode WIF data to a WIF string
|
||||
|
||||
#### `toWifStringSync({ version, privateKey, compressed })`
|
||||
|
||||
Encode WIF data to a WIF string (synchronous)
|
||||
|
||||
### `@exodus/bytes/array.js`
|
||||
|
||||
TypedArray utils and conversions.
|
||||
|
||||
```js
|
||||
import { typedView } from '@exodus/bytes/array.js'
|
||||
```
|
||||
|
||||
#### `typedView(arr, format = 'uint8')`
|
||||
|
||||
Create a view of a TypedArray in the specified format (`'uint8'` or `'buffer'`)
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Does not copy data, returns a view on the same underlying buffer
|
||||
|
||||
### `@exodus/bytes/encoding.js`
|
||||
|
||||
Implements the [Encoding standard](https://encoding.spec.whatwg.org/):
|
||||
[TextDecoder](https://encoding.spec.whatwg.org/#interface-textdecoder),
|
||||
[TextEncoder](https://encoding.spec.whatwg.org/#interface-textencoder),
|
||||
[TextDecoderStream](https://encoding.spec.whatwg.org/#interface-textdecoderstream),
|
||||
[TextEncoderStream](https://encoding.spec.whatwg.org/#interface-textencoderstream),
|
||||
some [hooks](https://encoding.spec.whatwg.org/#specification-hooks).
|
||||
|
||||
```js
|
||||
import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding.js'
|
||||
import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding.js' // Requires Streams
|
||||
|
||||
// Hooks for standards
|
||||
import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding.js'
|
||||
```
|
||||
|
||||
#### `new TextDecoder(label = 'utf-8', { fatal = false, ignoreBOM = false })`
|
||||
|
||||
[TextDecoder](https://encoding.spec.whatwg.org/#interface-textdecoder) implementation/polyfill.
|
||||
|
||||
Decode bytes to strings according to [WHATWG Encoding](https://encoding.spec.whatwg.org) specification.
|
||||
|
||||
#### `new TextEncoder()`
|
||||
|
||||
[TextEncoder](https://encoding.spec.whatwg.org/#interface-textencoder) implementation/polyfill.
|
||||
|
||||
Encode strings to UTF-8 bytes according to [WHATWG Encoding](https://encoding.spec.whatwg.org) specification.
|
||||
|
||||
#### `new TextDecoderStream(label = 'utf-8', { fatal = false, ignoreBOM = false })`
|
||||
|
||||
[TextDecoderStream](https://encoding.spec.whatwg.org/#interface-textdecoderstream) implementation/polyfill.
|
||||
|
||||
A [Streams](https://streams.spec.whatwg.org/) wrapper for `TextDecoder`.
|
||||
|
||||
Requires [Streams](https://streams.spec.whatwg.org/) to be either supported by the platform or
|
||||
[polyfilled](https://npmjs.com/package/web-streams-polyfill).
|
||||
|
||||
#### `new TextEncoderStream()`
|
||||
|
||||
[TextEncoderStream](https://encoding.spec.whatwg.org/#interface-textencoderstream) implementation/polyfill.
|
||||
|
||||
A [Streams](https://streams.spec.whatwg.org/) wrapper for `TextEncoder`.
|
||||
|
||||
Requires [Streams](https://streams.spec.whatwg.org/) to be either supported by the platform or
|
||||
[polyfilled](https://npmjs.com/package/web-streams-polyfill).
|
||||
|
||||
#### `labelToName(label)`
|
||||
|
||||
Implements [get an encoding from a string `label`](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
|
||||
Convert an encoding [label](https://encoding.spec.whatwg.org/#names-and-labels) to its name,
|
||||
as a case-sensitive string.
|
||||
|
||||
If an encoding with that label does not exist, returns `null`.
|
||||
|
||||
All encoding names are also valid labels for corresponding encodings.
|
||||
|
||||
#### `normalizeEncoding(label)`
|
||||
|
||||
Convert an encoding [label](https://encoding.spec.whatwg.org/#names-and-labels) to its name,
|
||||
as an ASCII-lowercased string.
|
||||
|
||||
If an encoding with that label does not exist, returns `null`.
|
||||
|
||||
This is the same as [`decoder.encoding` getter](https://encoding.spec.whatwg.org/#dom-textdecoder-encoding),
|
||||
except that it:
|
||||
1. Supports [`replacement` encoding](https://encoding.spec.whatwg.org/#replacement) and its
|
||||
[labels](https://encoding.spec.whatwg.org/#ref-for-replacement%E2%91%A1)
|
||||
2. Does not throw for invalid labels and instead returns `null`
|
||||
|
||||
It is identical to:
|
||||
```js
|
||||
labelToName(label)?.toLowerCase() ?? null
|
||||
```
|
||||
|
||||
All encoding names are also valid labels for corresponding encodings.
|
||||
|
||||
#### `getBOMEncoding(input)`
|
||||
|
||||
Implements [BOM sniff](https://encoding.spec.whatwg.org/#bom-sniff) legacy hook.
|
||||
|
||||
Given a `TypedArray` or an `ArrayBuffer` instance `input`, returns either of:
|
||||
- `'utf-8'`, if `input` starts with UTF-8 byte order mark.
|
||||
- `'utf-16le'`, if `input` starts with UTF-16LE byte order mark.
|
||||
- `'utf-16be'`, if `input` starts with UTF-16BE byte order mark.
|
||||
- `null` otherwise.
|
||||
|
||||
#### `legacyHookDecode(input, fallbackEncoding = 'utf-8')`
|
||||
|
||||
Implements [decode](https://encoding.spec.whatwg.org/#decode) legacy hook.
|
||||
|
||||
Given a `TypedArray` or an `ArrayBuffer` instance `input` and an optional `fallbackEncoding`
|
||||
encoding [label](https://encoding.spec.whatwg.org/#names-and-labels),
|
||||
sniffs encoding from BOM with `fallbackEncoding` fallback and then
|
||||
decodes the `input` using that encoding, skipping BOM if it was present.
|
||||
|
||||
Notes:
|
||||
|
||||
- BOM-sniffed encoding takes precedence over `fallbackEncoding` option per spec.
|
||||
Use with care.
|
||||
- Always operates in non-fatal [mode](https://encoding.spec.whatwg.org/#textdecoder-error-mode),
|
||||
aka replacement. It can convert different byte sequences to equal strings.
|
||||
|
||||
This method is similar to the following code, except that it doesn't support encoding labels and
|
||||
only expects lowercased encoding name:
|
||||
|
||||
```js
|
||||
new TextDecoder(getBOMEncoding(input) ?? fallbackEncoding).decode(input)
|
||||
```
|
||||
|
||||
### `@exodus/bytes/encoding-lite.js`
|
||||
|
||||
The exact same exports as `@exodus/bytes/encoding.js` are also exported as
|
||||
`@exodus/bytes/encoding-lite.js`, with the difference that the lite version does not load
|
||||
multi-byte `TextDecoder` encodings by default to reduce bundle size 10x.
|
||||
|
||||
```js
|
||||
import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding-lite.js'
|
||||
import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-lite.js' // Requires Streams
|
||||
|
||||
// Hooks for standards
|
||||
import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding-lite.js'
|
||||
```
|
||||
|
||||
The only affected encodings are: `gbk`, `gb18030`, `big5`, `euc-jp`, `iso-2022-jp`, `shift_jis`
|
||||
and their [labels](https://encoding.spec.whatwg.org/#names-and-labels) when used with `TextDecoder`.
|
||||
|
||||
Legacy single-byte encodingds are loaded by default in both cases.
|
||||
|
||||
`TextEncoder` and hooks for standards (including `labelToName` / `normalizeEncoding`) do not have any behavior
|
||||
differences in the lite version and support full range if inputs.
|
||||
|
||||
To avoid inconsistencies, the exported classes and methods are exactly the same objects.
|
||||
|
||||
```console
|
||||
> lite = require('@exodus/bytes/encoding-lite.js')
|
||||
[Module: null prototype] {
|
||||
TextDecoder: [class TextDecoder],
|
||||
TextDecoderStream: [class TextDecoderStream],
|
||||
TextEncoder: [class TextEncoder],
|
||||
TextEncoderStream: [class TextEncoderStream],
|
||||
getBOMEncoding: [Function: getBOMEncoding],
|
||||
labelToName: [Function: labelToName],
|
||||
legacyHookDecode: [Function: legacyHookDecode],
|
||||
normalizeEncoding: [Function: normalizeEncoding]
|
||||
}
|
||||
> new lite.TextDecoder('big5').decode(Uint8Array.of(0x25))
|
||||
Uncaught:
|
||||
Error: Legacy multi-byte encodings are disabled in /encoding-lite.js, use /encoding.js for full encodings range support
|
||||
|
||||
> full = require('@exodus/bytes/encoding.js')
|
||||
[Module: null prototype] {
|
||||
TextDecoder: [class TextDecoder],
|
||||
TextDecoderStream: [class TextDecoderStream],
|
||||
TextEncoder: [class TextEncoder],
|
||||
TextEncoderStream: [class TextEncoderStream],
|
||||
getBOMEncoding: [Function: getBOMEncoding],
|
||||
labelToName: [Function: labelToName],
|
||||
legacyHookDecode: [Function: legacyHookDecode],
|
||||
normalizeEncoding: [Function: normalizeEncoding]
|
||||
}
|
||||
> full.TextDecoder === lite.TextDecoder
|
||||
true
|
||||
> new full.TextDecoder('big5').decode(Uint8Array.of(0x25))
|
||||
'%'
|
||||
> new lite.TextDecoder('big5').decode(Uint8Array.of(0x25))
|
||||
'%'
|
||||
```
|
||||
|
||||
### `@exodus/bytes/encoding-browser.js`
|
||||
|
||||
Same as `@exodus/bytes/encoding.js`, but in browsers instead of polyfilling just uses whatever the
|
||||
browser provides, drastically reducing the bundle size (to less than 2 KiB gzipped).
|
||||
|
||||
```js
|
||||
import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding-browser.js'
|
||||
import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-browser.js' // Requires Streams
|
||||
|
||||
// Hooks for standards
|
||||
import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding-browser.js'
|
||||
```
|
||||
|
||||
Under non-browser engines (Node.js, React Native, etc.) a full polyfill is used as those platforms
|
||||
do not provide sufficiently complete / non-buggy `TextDecoder` APIs.
|
||||
|
||||
> [!NOTE]
|
||||
> Implementations in browsers [have bugs](https://docs.google.com/spreadsheets/d/1pdEefRG6r9fZy61WHGz0TKSt8cO4ISWqlpBN5KntIvQ/edit),
|
||||
> but they are fixing them and the expected update window is short.\
|
||||
> If you want to circumvent browser bugs, use full `@exodus/bytes/encoding.js` import.
|
||||
|
||||
### `@exodus/bytes/whatwg.js`
|
||||
|
||||
WHATWG helpers
|
||||
|
||||
```js
|
||||
import '@exodus/bytes/encoding.js' // For full legacy multi-byte encodings support
|
||||
import { percentEncodeAfterEncoding } from '@exodus/bytes/whatwg.js'
|
||||
```
|
||||
|
||||
#### `percentEncodeAfterEncoding(encoding, input, percentEncodeSet, spaceAsPlus = false)`
|
||||
|
||||
Implements [percent-encode after encoding](https://url.spec.whatwg.org/#string-percent-encode-after-encoding)
|
||||
per WHATWG URL specification.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> You must import `@exodus/bytes/encoding.js` for this API to accept legacy multi-byte encodings.
|
||||
|
||||
Encodings `utf16-le`, `utf16-be`, and `replacement` are not accepted.
|
||||
|
||||
[C0 control percent-encode set](https://url.spec.whatwg.org/#c0-control-percent-encode-set) is
|
||||
always percent-encoded.
|
||||
|
||||
`percentEncodeSet` is an addition to that, and must be a string of unique increasing codepoints
|
||||
in range 0x20 - 0x7e, e.g. `' "#<>'`.
|
||||
|
||||
This method accepts [DOMStrings](https://webidl.spec.whatwg.org/#idl-DOMString) and converts them
|
||||
to [USVStrings](https://webidl.spec.whatwg.org/#idl-USVString).
|
||||
This is different from e.g. `encodeURI` and `encodeURIComponent` which throw on surrogates:
|
||||
```js
|
||||
> percentEncodeAfterEncoding('utf8', '\ud800', ' "#$%&+,/:;<=>?@[\\]^`{|}') // component
|
||||
'%EF%BF%BD'
|
||||
> encodeURIComponent('\ud800')
|
||||
Uncaught URIError: URI malformed
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
See [GitHub Releases](https://github.com/ExodusOSS/bytes/releases) tab
|
||||
|
||||
## License
|
||||
|
||||
[MIT](./LICENSE)
|
||||
62
node_modules/@exodus/bytes/array.d.ts
generated
vendored
Normal file
62
node_modules/@exodus/bytes/array.d.ts
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/**
|
||||
* TypedArray utils and conversions.
|
||||
*
|
||||
* ```js
|
||||
* import { typedView } from '@exodus/bytes/array.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/array.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
// >= TypeScript 5.9 made Uint8Array templated with <> and defaulted to ArrayBufferLike
|
||||
// which would incorrectly accept SharedArrayBuffer instances.
|
||||
// < TypeScript 5.7 doesn't support templates for Uint8Array.
|
||||
// So this type is defined as a workaround to evaluate to Uint8Array<ArrayBuffer> on all versions of TypeScript.
|
||||
|
||||
/**
|
||||
* This is `Uint8Array<ArrayBuffer>`
|
||||
* (as opposed to `Uint8Array<SharedArrayBuffer>` and `Uint8Array<ArrayBufferLike>`)
|
||||
* on TypeScript versions that support that distinction.
|
||||
*
|
||||
* On TypeScript < 5.7, this is just `Uint8Array`, as it's not a template there.
|
||||
*/
|
||||
export type Uint8ArrayBuffer = ReturnType<typeof Uint8Array.from>;
|
||||
|
||||
/**
|
||||
* This is `Uint16Array<ArrayBuffer>`
|
||||
* (as opposed to `Uint16Array<SharedArrayBuffer>` and `Uint16Array<ArrayBufferLike>`)
|
||||
* on TypeScript versions that support that distinction.
|
||||
*
|
||||
* On TypeScript < 5.7, this is just `Uint16Array`, as it's not a template there.
|
||||
*/
|
||||
export type Uint16ArrayBuffer = ReturnType<typeof Uint16Array.from>;
|
||||
|
||||
/**
|
||||
* This is `Uint32Array<ArrayBuffer>`
|
||||
* (as opposed to `Uint32Array<SharedArrayBuffer>` and `Uint32Array<ArrayBufferLike>`)
|
||||
* on TypeScript versions that support that distinction.
|
||||
*
|
||||
* On TypeScript < 5.7, this is just `Uint32Array`, as it's not a template there.
|
||||
*/
|
||||
export type Uint32ArrayBuffer = ReturnType<typeof Uint32Array.from>;
|
||||
|
||||
/**
|
||||
* Output format for typed array conversions
|
||||
*/
|
||||
export type OutputFormat = 'uint8' | 'buffer';
|
||||
|
||||
/**
|
||||
* Create a view of a TypedArray in the specified format (`'uint8'` or `'buffer'`)
|
||||
*
|
||||
* > [!IMPORTANT]
|
||||
* > Does not copy data, returns a view on the same underlying buffer
|
||||
*
|
||||
* @param arr - The input TypedArray
|
||||
* @param format - The desired output format (`'uint8'` or `'buffer'`)
|
||||
* @returns A view on the same underlying buffer
|
||||
*/
|
||||
export function typedView(arr: ArrayBufferView, format: 'uint8'): Uint8Array;
|
||||
export function typedView(arr: ArrayBufferView, format: 'buffer'): Buffer;
|
||||
export function typedView(arr: ArrayBufferView, format: OutputFormat): Uint8Array | Buffer;
|
||||
17
node_modules/@exodus/bytes/array.js
generated
vendored
Normal file
17
node_modules/@exodus/bytes/array.js
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
import { assertTypedArray } from './assert.js'
|
||||
|
||||
const { Buffer } = globalThis // Buffer is optional
|
||||
|
||||
export function typedView(arr, format) {
|
||||
assertTypedArray(arr)
|
||||
switch (format) {
|
||||
case 'uint8':
|
||||
if (arr.constructor === Uint8Array) return arr // fast path
|
||||
return new Uint8Array(arr.buffer, arr.byteOffset, arr.byteLength)
|
||||
case 'buffer':
|
||||
if (arr.constructor === Buffer && Buffer.isBuffer(arr)) return arr
|
||||
return Buffer.from(arr.buffer, arr.byteOffset, arr.byteLength)
|
||||
}
|
||||
|
||||
throw new TypeError('Unexpected format')
|
||||
}
|
||||
26
node_modules/@exodus/bytes/assert.js
generated
vendored
Normal file
26
node_modules/@exodus/bytes/assert.js
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
export function assertEmptyRest(rest) {
|
||||
if (Object.keys(rest).length > 0) throw new TypeError('Unexpected extra options')
|
||||
}
|
||||
|
||||
// eslint-disable-next-line sonarjs/no-nested-template-literals
|
||||
const makeMessage = (name, extra) => `Expected${name ? ` ${name} to be` : ''} an Uint8Array${extra}`
|
||||
|
||||
const TypedArray = Object.getPrototypeOf(Uint8Array)
|
||||
|
||||
export function assertTypedArray(arr) {
|
||||
if (arr instanceof TypedArray) return
|
||||
throw new TypeError('Expected a TypedArray instance')
|
||||
}
|
||||
|
||||
export function assertUint8(arr, options) {
|
||||
if (!options) {
|
||||
// fast path
|
||||
if (arr instanceof Uint8Array) return
|
||||
throw new TypeError('Expected an Uint8Array')
|
||||
}
|
||||
|
||||
const { name, length, ...rest } = options
|
||||
assertEmptyRest(rest)
|
||||
if (arr instanceof Uint8Array && (length === undefined || arr.length === length)) return
|
||||
throw new TypeError(makeMessage(name, length === undefined ? '' : ` of size ${Number(length)}`))
|
||||
}
|
||||
83
node_modules/@exodus/bytes/base32.d.ts
generated
vendored
Normal file
83
node_modules/@exodus/bytes/base32.d.ts
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/**
|
||||
* Implements base32 and base32hex from [RFC4648](https://datatracker.ietf.org/doc/html/rfc4648)
|
||||
* (no differences from [RFC3548](https://datatracker.ietf.org/doc/html/rfc4648)).
|
||||
*
|
||||
* ```js
|
||||
* import { fromBase32, toBase32 } from '@exodus/bytes/base32.js'
|
||||
* import { fromBase32hex, toBase32hex } from '@exodus/bytes/base32.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/base32.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Options for base32 encoding
|
||||
*/
|
||||
export interface ToBase32Options {
|
||||
/** Whether to include padding characters (default: false) */
|
||||
padding?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Padding mode for base32 decoding
|
||||
* - `true`: padding is required
|
||||
* - `false`: padding is not allowed
|
||||
* - `'both'`: padding is optional (default)
|
||||
*/
|
||||
export type PaddingMode = boolean | 'both';
|
||||
|
||||
/**
|
||||
* Options for base32 decoding
|
||||
*/
|
||||
export interface FromBase32Options {
|
||||
/** Output format (default: 'uint8') */
|
||||
format?: OutputFormat;
|
||||
/** Padding mode */
|
||||
padding?: PaddingMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a base32 string (RFC 4648)
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @param options - Encoding options
|
||||
* @returns The base32 encoded string
|
||||
*/
|
||||
export function toBase32(arr: Uint8Array, options?: ToBase32Options): string;
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a base32hex string (RFC 4648)
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @param options - Encoding options (padding defaults to false)
|
||||
* @returns The base32hex encoded string
|
||||
*/
|
||||
export function toBase32hex(arr: Uint8Array, options?: ToBase32Options): string;
|
||||
|
||||
/**
|
||||
* Decode a base32 string to bytes
|
||||
*
|
||||
* Operates in strict mode for last chunk, does not allow whitespace
|
||||
*
|
||||
* @param string - The base32 encoded string
|
||||
* @param options - Decoding options
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase32(string: string, options?: FromBase32Options): Uint8ArrayBuffer;
|
||||
export function fromBase32(string: string, options: FromBase32Options & { format: 'buffer' }): Buffer;
|
||||
|
||||
/**
|
||||
* Decode a base32hex string to bytes
|
||||
*
|
||||
* Operates in strict mode for last chunk, does not allow whitespace
|
||||
*
|
||||
* @param string - The base32hex encoded string
|
||||
* @param options - Decoding options
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase32hex(string: string, options?: FromBase32Options): Uint8ArrayBuffer;
|
||||
export function fromBase32hex(string: string, options: FromBase32Options & { format: 'buffer' }): Buffer;
|
||||
41
node_modules/@exodus/bytes/base32.js
generated
vendored
Normal file
41
node_modules/@exodus/bytes/base32.js
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
import { assertEmptyRest } from './assert.js'
|
||||
import { typedView } from './array.js'
|
||||
import { E_STRING } from './fallback/_utils.js'
|
||||
import * as js from './fallback/base32.js'
|
||||
|
||||
// See https://datatracker.ietf.org/doc/html/rfc4648
|
||||
|
||||
// 8 chars per 5 bytes
|
||||
|
||||
const { E_PADDING } = js
|
||||
|
||||
export const toBase32 = (arr, { padding = false } = {}) => js.toBase32(arr, false, padding)
|
||||
export const toBase32hex = (arr, { padding = false } = {}) => js.toBase32(arr, true, padding)
|
||||
|
||||
// By default, valid padding is accepted but not required
|
||||
export function fromBase32(str, options) {
|
||||
if (!options) return fromBase32common(str, false, 'both', 'uint8', null)
|
||||
const { format = 'uint8', padding = 'both', ...rest } = options
|
||||
return fromBase32common(str, false, padding, format, rest)
|
||||
}
|
||||
|
||||
export function fromBase32hex(str, options) {
|
||||
if (!options) return fromBase32common(str, true, 'both', 'uint8', null)
|
||||
const { format = 'uint8', padding = 'both', ...rest } = options
|
||||
return fromBase32common(str, true, padding, format, rest)
|
||||
}
|
||||
|
||||
function fromBase32common(str, isBase32Hex, padding, format, rest) {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (rest !== null) assertEmptyRest(rest)
|
||||
|
||||
if (padding === true) {
|
||||
if (str.length % 8 !== 0) throw new SyntaxError(E_PADDING)
|
||||
} else if (padding === false) {
|
||||
if (str.endsWith('=')) throw new SyntaxError('Did not expect padding in base32 input')
|
||||
} else if (padding !== 'both') {
|
||||
throw new TypeError('Invalid padding option')
|
||||
}
|
||||
|
||||
return typedView(js.fromBase32(str, isBase32Hex), format)
|
||||
}
|
||||
62
node_modules/@exodus/bytes/base58.d.ts
generated
vendored
Normal file
62
node_modules/@exodus/bytes/base58.d.ts
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Implements [base58](https://www.ietf.org/archive/id/draft-msporny-base58-03.txt) encoding.
|
||||
*
|
||||
* Supports both standard base58 and XRP variant alphabets.
|
||||
*
|
||||
* ```js
|
||||
* import { fromBase58, toBase58 } from '@exodus/bytes/base58.js'
|
||||
* import { fromBase58xrp, toBase58xrp } from '@exodus/bytes/base58.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/base58.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a base58 string
|
||||
*
|
||||
* Uses the standard Bitcoin base58 alphabet
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @returns The base58 encoded string
|
||||
*/
|
||||
export function toBase58(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Decode a base58 string to bytes
|
||||
*
|
||||
* Uses the standard Bitcoin base58 alphabet
|
||||
*
|
||||
* @param string - The base58 encoded string
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase58(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
export function fromBase58(string: string, format: 'buffer'): Buffer;
|
||||
export function fromBase58(string: string, format?: OutputFormat): Uint8ArrayBuffer | Buffer;
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a base58 string using XRP alphabet
|
||||
*
|
||||
* Uses the XRP variant base58 alphabet
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @returns The base58 encoded string
|
||||
*/
|
||||
export function toBase58xrp(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Decode a base58 string to bytes using XRP alphabet
|
||||
*
|
||||
* Uses the XRP variant base58 alphabet
|
||||
*
|
||||
* @param string - The base58 encoded string
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase58xrp(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
export function fromBase58xrp(string: string, format: 'buffer'): Buffer;
|
||||
export function fromBase58xrp(string: string, format?: OutputFormat): Uint8ArrayBuffer | Buffer;
|
||||
220
node_modules/@exodus/bytes/base58.js
generated
vendored
Normal file
220
node_modules/@exodus/bytes/base58.js
generated
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
import { typedView } from './array.js'
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { nativeDecoder, nativeEncoder, isHermes, E_STRING } from './fallback/_utils.js'
|
||||
import { encodeAscii, decodeAscii } from './fallback/latin1.js'
|
||||
|
||||
const alphabet58 = [...'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz']
|
||||
const alphabetXRP = [...'rpshnaf39wBUDNEGHJKLM4PQRST7VWXYZ2bcdeCg65jkm8oFqi1tuvAxyz']
|
||||
const codes58 = new Uint8Array(alphabet58.map((x) => x.charCodeAt(0)))
|
||||
const codesXRP = new Uint8Array(alphabetXRP.map((x) => x.charCodeAt(0)))
|
||||
|
||||
const _0n = BigInt(0)
|
||||
const _1n = BigInt(1)
|
||||
const _8n = BigInt(8)
|
||||
const _32n = BigInt(32)
|
||||
const _58n = BigInt(58)
|
||||
const _0xffffffffn = BigInt(0xff_ff_ff_ff)
|
||||
|
||||
let table // 15 * 82, diagonal, <1kb
|
||||
const fromMaps = new Map()
|
||||
|
||||
const E_CHAR = 'Invalid character in base58 input'
|
||||
|
||||
const shouldUseBigIntFrom = isHermes // faster only on Hermes, numbers path beats it on normal engines
|
||||
|
||||
function toBase58core(arr, alphabet, codes) {
|
||||
assertUint8(arr)
|
||||
const length = arr.length
|
||||
if (length === 0) return ''
|
||||
|
||||
const ZERO = alphabet[0]
|
||||
let zeros = 0
|
||||
while (zeros < length && arr[zeros] === 0) zeros++
|
||||
|
||||
if (length > 60) {
|
||||
// Slow path. Can be optimized ~10%, but the main factor is /58n division anyway, so doesn't matter much
|
||||
let x = _0n
|
||||
for (let i = 0; i < arr.length; i++) x = (x << _8n) | BigInt(arr[i])
|
||||
|
||||
let out = ''
|
||||
while (x) {
|
||||
const d = x / _58n
|
||||
out = alphabet[Number(x - _58n * d)] + out
|
||||
x = d
|
||||
}
|
||||
|
||||
return ZERO.repeat(zeros) + out
|
||||
}
|
||||
|
||||
// We run fast mode operations only on short (<=60 bytes) inputs, via precomputation table
|
||||
if (!table) {
|
||||
table = []
|
||||
let x = _1n
|
||||
for (let i = 0; i < 15; i++) {
|
||||
// Convert x to base 58 digits
|
||||
const in58 = []
|
||||
let y = x
|
||||
while (y) {
|
||||
const d = y / _58n
|
||||
in58.push(Number(y - _58n * d))
|
||||
y = d
|
||||
}
|
||||
|
||||
table.push(new Uint8Array(in58))
|
||||
x <<= _32n
|
||||
}
|
||||
}
|
||||
|
||||
const res = []
|
||||
{
|
||||
let j = 0
|
||||
// We group each 4 bytes into 32-bit chunks
|
||||
// Not using u32arr to not deal with remainder + BE/LE differences
|
||||
for (let i = length - 1; i >= 0; i -= 4) {
|
||||
let c
|
||||
if (i > 2) {
|
||||
c = (arr[i] | (arr[i - 1] << 8) | (arr[i - 2] << 16) | (arr[i - 3] << 24)) >>> 0
|
||||
} else if (i > 1) {
|
||||
c = arr[i] | (arr[i - 1] << 8) | (arr[i - 2] << 16)
|
||||
} else {
|
||||
c = i === 1 ? arr[i] | (arr[i - 1] << 8) : arr[i]
|
||||
}
|
||||
|
||||
const row = table[j++]
|
||||
if (c === 0) continue
|
||||
const olen = res.length
|
||||
const nlen = row.length
|
||||
let k = 0
|
||||
for (; k < olen; k++) res[k] += c * row[k]
|
||||
while (k < nlen) res.push(c * row[k++])
|
||||
}
|
||||
}
|
||||
|
||||
// We can now do a single scan over regular numbers under MAX_SAFE_INTEGER
|
||||
// Note: can't use int32 operations on them, as they are outside of 2**32 range
|
||||
// This is faster though
|
||||
{
|
||||
let carry = 0
|
||||
let i = 0
|
||||
while (i < res.length) {
|
||||
const c = res[i] + carry
|
||||
carry = Math.floor(c / 58)
|
||||
res[i++] = c - carry * 58
|
||||
}
|
||||
|
||||
while (carry) {
|
||||
const c = carry
|
||||
carry = Math.floor(c / 58)
|
||||
res.push(c - carry * 58)
|
||||
}
|
||||
}
|
||||
|
||||
if (nativeDecoder) {
|
||||
const oa = new Uint8Array(res.length)
|
||||
let j = 0
|
||||
for (let i = res.length - 1; i >= 0; i--) oa[j++] = codes[res[i]]
|
||||
return ZERO.repeat(zeros) + decodeAscii(oa)
|
||||
}
|
||||
|
||||
let out = ''
|
||||
for (let i = res.length - 1; i >= 0; i--) out += alphabet[res[i]]
|
||||
return ZERO.repeat(zeros) + out
|
||||
}
|
||||
|
||||
function fromBase58core(str, alphabet, codes, format = 'uint8') {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
const length = str.length
|
||||
if (length === 0) return typedView(new Uint8Array(), format)
|
||||
|
||||
const zeroC = codes[0]
|
||||
let zeros = 0
|
||||
while (zeros < length && str.charCodeAt(zeros) === zeroC) zeros++
|
||||
|
||||
let fromMap = fromMaps.get(alphabet)
|
||||
if (!fromMap) {
|
||||
fromMap = new Int8Array(256).fill(-1)
|
||||
for (let i = 0; i < 58; i++) fromMap[alphabet[i].charCodeAt(0)] = i
|
||||
fromMaps.set(alphabet, fromMap)
|
||||
}
|
||||
|
||||
const size = zeros + (((length - zeros + 1) * 3) >> 2) // 3/4 rounded up, larger than ~0.73 coef to fit everything
|
||||
const res = new Uint8Array(size)
|
||||
let at = size // where is the first significant byte written
|
||||
|
||||
if (shouldUseBigIntFrom) {
|
||||
let x = _0n
|
||||
|
||||
// nativeEncoder gives a benefit here
|
||||
if (nativeEncoder) {
|
||||
const codes = encodeAscii(str, E_CHAR)
|
||||
for (let i = zeros; i < length; i++) {
|
||||
const c = fromMap[codes[i]]
|
||||
if (c < 0) throw new SyntaxError(E_CHAR)
|
||||
x = x * _58n + BigInt(c)
|
||||
}
|
||||
} else {
|
||||
for (let i = zeros; i < length; i++) {
|
||||
const charCode = str.charCodeAt(i)
|
||||
const c = fromMap[charCode]
|
||||
if (charCode > 255 || c < 0) throw new SyntaxError(E_CHAR)
|
||||
x = x * _58n + BigInt(c)
|
||||
}
|
||||
}
|
||||
|
||||
while (x) {
|
||||
let y = Number(x & _0xffffffffn)
|
||||
x >>= 32n
|
||||
res[--at] = y & 0xff
|
||||
y >>>= 8
|
||||
if (!x && !y) break
|
||||
res[--at] = y & 0xff
|
||||
y >>>= 8
|
||||
if (!x && !y) break
|
||||
res[--at] = y & 0xff
|
||||
y >>>= 8
|
||||
if (!x && !y) break
|
||||
res[--at] = y & 0xff
|
||||
}
|
||||
} else {
|
||||
for (let i = zeros; i < length; i++) {
|
||||
const charCode = str.charCodeAt(i)
|
||||
let c = fromMap[charCode]
|
||||
if (charCode > 255 || c < 0) throw new SyntaxError(E_CHAR)
|
||||
|
||||
let k = size - 1
|
||||
for (;;) {
|
||||
if (c === 0 && k < at) break
|
||||
c += 58 * res[k]
|
||||
res[k] = c & 0xff
|
||||
c >>>= 8
|
||||
k--
|
||||
// unroll a bit
|
||||
if (c === 0 && k < at) break
|
||||
c += 58 * res[k]
|
||||
res[k] = c & 0xff
|
||||
c >>>= 8
|
||||
k--
|
||||
if (c === 0 && k < at) break
|
||||
c += 58 * res[k]
|
||||
res[k] = c & 0xff
|
||||
c >>>= 8
|
||||
k--
|
||||
if (c === 0 && k < at) break
|
||||
c += 58 * res[k]
|
||||
res[k] = c & 0xff
|
||||
c >>>= 8
|
||||
k--
|
||||
}
|
||||
|
||||
at = k + 1
|
||||
if (c !== 0 || at < zeros) /* c8 ignore next */ throw new Error('Unexpected') // unreachable
|
||||
}
|
||||
}
|
||||
|
||||
return typedView(res.slice(at - zeros), format) // slice is faster for small sizes than subarray
|
||||
}
|
||||
|
||||
export const toBase58 = (arr) => toBase58core(arr, alphabet58, codes58)
|
||||
export const fromBase58 = (str, format) => fromBase58core(str, alphabet58, codes58, format)
|
||||
export const toBase58xrp = (arr) => toBase58core(arr, alphabetXRP, codesXRP)
|
||||
export const fromBase58xrp = (str, format) => fromBase58core(str, alphabetXRP, codesXRP, format)
|
||||
131
node_modules/@exodus/bytes/base58check.d.ts
generated
vendored
Normal file
131
node_modules/@exodus/bytes/base58check.d.ts
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
/**
|
||||
* Implements [base58check](https://en.bitcoin.it/wiki/Base58Check_encoding) encoding.
|
||||
*
|
||||
* ```js
|
||||
* import { fromBase58check, toBase58check } from '@exodus/bytes/base58check.js'
|
||||
* import { fromBase58checkSync, toBase58checkSync } from '@exodus/bytes/base58check.js'
|
||||
* import { makeBase58check } from '@exodus/bytes/base58check.js'
|
||||
* ```
|
||||
*
|
||||
* On non-Node.js, requires peer dependency [@noble/hashes](https://www.npmjs.com/package/@noble/hashes) to be installed.
|
||||
*
|
||||
* @module @exodus/bytes/base58check.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Hash function type that takes Uint8Array and returns a Promise of Uint8Array
|
||||
*/
|
||||
export type HashFunction = (data: Uint8Array) => Promise<Uint8Array>;
|
||||
|
||||
/**
|
||||
* Synchronous hash function type that takes Uint8Array and returns Uint8Array
|
||||
*/
|
||||
export type HashFunctionSync = (data: Uint8Array) => Uint8Array;
|
||||
|
||||
/**
|
||||
* Base58Check encoder/decoder instance with async methods
|
||||
*/
|
||||
export interface Base58CheckAsync {
|
||||
/**
|
||||
* Encode bytes to base58check string asynchronously
|
||||
*
|
||||
* @param arr - The input bytes to encode
|
||||
* @returns A Promise that resolves to the base58check encoded string
|
||||
*/
|
||||
encode(arr: Uint8Array): Promise<string>;
|
||||
|
||||
/**
|
||||
* Decode a base58check string to bytes asynchronously
|
||||
*
|
||||
* @param string - The base58check encoded string
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns A Promise that resolves to the decoded bytes
|
||||
*/
|
||||
decode(string: string, format?: 'uint8'): Promise<Uint8ArrayBuffer>;
|
||||
decode(string: string, format: 'buffer'): Promise<Buffer>;
|
||||
decode(string: string, format?: OutputFormat): Promise<Uint8ArrayBuffer | Buffer>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Base58Check encoder/decoder instance with both async and sync methods
|
||||
*/
|
||||
export interface Base58CheckSync extends Base58CheckAsync {
|
||||
/**
|
||||
* Encode bytes to base58check string synchronously
|
||||
*
|
||||
* @param arr - The input bytes to encode
|
||||
* @returns The base58check encoded string
|
||||
*/
|
||||
encodeSync(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Decode a base58check string to bytes synchronously
|
||||
*
|
||||
* @param string - The base58check encoded string
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
decodeSync(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
decodeSync(string: string, format: 'buffer'): Buffer;
|
||||
decodeSync(string: string, format?: OutputFormat): Uint8ArrayBuffer | Buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a base58check encoder/decoder with custom hash functions
|
||||
*
|
||||
* @param hashAlgo - Async hash function (typically double SHA-256)
|
||||
* @param hashAlgoSync - Optional sync hash function
|
||||
* @returns Base58Check encoder/decoder instance
|
||||
*/
|
||||
export function makeBase58check(hashAlgo: HashFunction, hashAlgoSync?: HashFunctionSync): Base58CheckSync;
|
||||
export function makeBase58check(hashAlgo: HashFunction): Base58CheckAsync;
|
||||
|
||||
/**
|
||||
* Encode bytes to base58check string asynchronously
|
||||
*
|
||||
* Uses double SHA-256 for checksum calculation
|
||||
*
|
||||
* @param arr - The input bytes to encode
|
||||
* @returns A Promise that resolves to the base58check encoded string
|
||||
*/
|
||||
export function toBase58check(arr: Uint8Array): Promise<string>;
|
||||
|
||||
/**
|
||||
* Decode a base58check string to bytes asynchronously
|
||||
*
|
||||
* Validates the checksum using double SHA-256
|
||||
*
|
||||
* @param string - The base58check encoded string
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns A Promise that resolves to the decoded bytes
|
||||
*/
|
||||
export function fromBase58check(string: string, format?: 'uint8'): Promise<Uint8ArrayBuffer>;
|
||||
export function fromBase58check(string: string, format: 'buffer'): Promise<Buffer>;
|
||||
export function fromBase58check(string: string, format?: OutputFormat): Promise<Uint8ArrayBuffer | Buffer>;
|
||||
|
||||
/**
|
||||
* Encode bytes to base58check string synchronously
|
||||
*
|
||||
* Uses double SHA-256 for checksum calculation
|
||||
*
|
||||
* @param arr - The input bytes to encode
|
||||
* @returns The base58check encoded string
|
||||
*/
|
||||
export function toBase58checkSync(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Decode a base58check string to bytes synchronously
|
||||
*
|
||||
* Validates the checksum using double SHA-256
|
||||
*
|
||||
* @param string - The base58check encoded string
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase58checkSync(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
export function fromBase58checkSync(string: string, format: 'buffer'): Buffer;
|
||||
export function fromBase58checkSync(string: string, format?: OutputFormat): Uint8ArrayBuffer | Buffer;
|
||||
19
node_modules/@exodus/bytes/base58check.js
generated
vendored
Normal file
19
node_modules/@exodus/bytes/base58check.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import { sha256 } from '@noble/hashes/sha2.js'
|
||||
import { makeBase58check } from './fallback/base58check.js'
|
||||
|
||||
// Note: while API is async, we use hashSync for now until we improve webcrypto perf for hash256
|
||||
// Inputs to base58 are typically very small, and that makes a difference
|
||||
|
||||
// Note: using native WebCrypto will have to have account for SharedArrayBuffer
|
||||
|
||||
const hash256sync = (x) => sha256(sha256(x))
|
||||
const hash256 = hash256sync // See note at the top
|
||||
const {
|
||||
encode: toBase58check,
|
||||
decode: fromBase58check,
|
||||
encodeSync: toBase58checkSync,
|
||||
decodeSync: fromBase58checkSync,
|
||||
} = makeBase58check(hash256, hash256sync)
|
||||
|
||||
export { makeBase58check } from './fallback/base58check.js'
|
||||
export { toBase58check, fromBase58check, toBase58checkSync, fromBase58checkSync }
|
||||
14
node_modules/@exodus/bytes/base58check.node.js
generated
vendored
Normal file
14
node_modules/@exodus/bytes/base58check.node.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import { hash } from 'node:crypto'
|
||||
import { makeBase58check } from './fallback/base58check.js'
|
||||
|
||||
const sha256 = (x) => hash('sha256', x, 'buffer')
|
||||
const hash256 = (x) => sha256(sha256(x))
|
||||
const {
|
||||
encode: toBase58check,
|
||||
decode: fromBase58check,
|
||||
encodeSync: toBase58checkSync,
|
||||
decodeSync: fromBase58checkSync,
|
||||
} = makeBase58check(hash256, hash256)
|
||||
|
||||
export { makeBase58check } from './fallback/base58check.js'
|
||||
export { toBase58check, fromBase58check, toBase58checkSync, fromBase58checkSync }
|
||||
96
node_modules/@exodus/bytes/base64.d.ts
generated
vendored
Normal file
96
node_modules/@exodus/bytes/base64.d.ts
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
/**
|
||||
* Implements base64 and base64url from [RFC4648](https://datatracker.ietf.org/doc/html/rfc4648)
|
||||
* (no differences from [RFC3548](https://datatracker.ietf.org/doc/html/rfc4648)).
|
||||
*
|
||||
* ```js
|
||||
* import { fromBase64, toBase64 } from '@exodus/bytes/base64.js'
|
||||
* import { fromBase64url, toBase64url } from '@exodus/bytes/base64.js'
|
||||
* import { fromBase64any } from '@exodus/bytes/base64.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/base64.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Options for base64 encoding
|
||||
*/
|
||||
export interface ToBase64Options {
|
||||
/** Whether to include padding characters (default: true for base64, false for base64url) */
|
||||
padding?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Padding mode for base64 decoding
|
||||
* - `true`: padding is required
|
||||
* - `false`: padding is not allowed (default for base64url)
|
||||
* - `'both'`: padding is optional (default for base64)
|
||||
*/
|
||||
export type PaddingMode = boolean | 'both';
|
||||
|
||||
/**
|
||||
* Options for base64 decoding
|
||||
*/
|
||||
export interface FromBase64Options {
|
||||
/** Output format (default: 'uint8') */
|
||||
format?: OutputFormat;
|
||||
/** Padding mode */
|
||||
padding?: PaddingMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a base64 string (RFC 4648)
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @param options - Encoding options
|
||||
* @returns The base64 encoded string
|
||||
*/
|
||||
export function toBase64(arr: Uint8Array, options?: ToBase64Options): string;
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a base64url string (RFC 4648)
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @param options - Encoding options (padding defaults to false)
|
||||
* @returns The base64url encoded string
|
||||
*/
|
||||
export function toBase64url(arr: Uint8Array, options?: ToBase64Options): string;
|
||||
|
||||
/**
|
||||
* Decode a base64 string to bytes
|
||||
*
|
||||
* Operates in strict mode for last chunk, does not allow whitespace
|
||||
*
|
||||
* @param string - The base64 encoded string
|
||||
* @param options - Decoding options
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase64(string: string, options?: FromBase64Options): Uint8ArrayBuffer;
|
||||
export function fromBase64(string: string, options: FromBase64Options & { format: 'buffer' }): Buffer;
|
||||
|
||||
/**
|
||||
* Decode a base64url string to bytes
|
||||
*
|
||||
* Operates in strict mode for last chunk, does not allow whitespace
|
||||
*
|
||||
* @param string - The base64url encoded string
|
||||
* @param options - Decoding options (padding defaults to false)
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase64url(string: string, options?: FromBase64Options): Uint8ArrayBuffer;
|
||||
export function fromBase64url(string: string, options: FromBase64Options & { format: 'buffer' }): Buffer;
|
||||
|
||||
/**
|
||||
* Decode either base64 or base64url string to bytes
|
||||
*
|
||||
* Automatically detects the variant based on characters present
|
||||
*
|
||||
* @param string - The base64 or base64url encoded string
|
||||
* @param options - Decoding options
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromBase64any(string: string, options?: FromBase64Options): Uint8ArrayBuffer;
|
||||
export function fromBase64any(string: string, options: FromBase64Options & { format: 'buffer' }): Buffer;
|
||||
177
node_modules/@exodus/bytes/base64.js
generated
vendored
Normal file
177
node_modules/@exodus/bytes/base64.js
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
import { assertUint8, assertEmptyRest } from './assert.js'
|
||||
import { typedView } from './array.js'
|
||||
import { isHermes, skipWeb, E_STRING } from './fallback/_utils.js'
|
||||
import { decodeLatin1, encodeLatin1 } from './fallback/latin1.js'
|
||||
import * as js from './fallback/base64.js'
|
||||
|
||||
// See https://datatracker.ietf.org/doc/html/rfc4648
|
||||
|
||||
// base64: A-Za-z0-9+/ and = if padding not disabled
|
||||
// base64url: A-Za-z0-9_- and = if padding enabled
|
||||
|
||||
const { Buffer, atob, btoa } = globalThis // Buffer is optional, only used when native
|
||||
const haveNativeBuffer = Buffer && !Buffer.TYPED_ARRAY_SUPPORT
|
||||
const { toBase64: web64 } = Uint8Array.prototype // Modern engines have this
|
||||
|
||||
const { E_CHAR, E_PADDING, E_LENGTH, E_LAST } = js
|
||||
|
||||
// faster only on Hermes (and a little in old Chrome), js path beats it on normal engines
|
||||
const shouldUseBtoa = btoa && isHermes
|
||||
const shouldUseAtob = atob && isHermes
|
||||
|
||||
// For native Buffer codepaths only
|
||||
const isBuffer = (x) => x.constructor === Buffer && Buffer.isBuffer(x)
|
||||
const toBuffer = (x) => (isBuffer(x) ? x : Buffer.from(x.buffer, x.byteOffset, x.byteLength))
|
||||
|
||||
function maybeUnpad(res, padding) {
|
||||
if (padding) return res
|
||||
const at = res.indexOf('=', res.length - 3)
|
||||
return at === -1 ? res : res.slice(0, at)
|
||||
}
|
||||
|
||||
function maybePad(res, padding) {
|
||||
return padding && res.length % 4 !== 0 ? res + '='.repeat(4 - (res.length % 4)) : res
|
||||
}
|
||||
|
||||
const toUrl = (x) => x.replaceAll('+', '-').replaceAll('/', '_')
|
||||
const haveWeb = (x) => !skipWeb && web64 && x.toBase64 === web64
|
||||
|
||||
export function toBase64(x, { padding = true } = {}) {
|
||||
assertUint8(x)
|
||||
if (haveWeb(x)) return padding ? x.toBase64() : x.toBase64({ omitPadding: !padding }) // Modern, optionless is slightly faster
|
||||
if (haveNativeBuffer) return maybeUnpad(toBuffer(x).base64Slice(0, x.byteLength), padding) // Older Node.js
|
||||
if (shouldUseBtoa) return maybeUnpad(btoa(decodeLatin1(x)), padding)
|
||||
return js.toBase64(x, false, padding) // Fallback
|
||||
}
|
||||
|
||||
// NOTE: base64url omits padding by default
|
||||
export function toBase64url(x, { padding = false } = {}) {
|
||||
assertUint8(x)
|
||||
if (haveWeb(x)) return x.toBase64({ alphabet: 'base64url', omitPadding: !padding }) // Modern
|
||||
if (haveNativeBuffer) return maybePad(toBuffer(x).base64urlSlice(0, x.byteLength), padding) // Older Node.js
|
||||
if (shouldUseBtoa) return maybeUnpad(toUrl(btoa(decodeLatin1(x))), padding)
|
||||
return js.toBase64(x, true, padding) // Fallback
|
||||
}
|
||||
|
||||
// Unlike Buffer.from(), throws on invalid input (non-base64 symbols and incomplete chunks)
|
||||
// Unlike Buffer.from() and Uint8Array.fromBase64(), does not allow spaces
|
||||
// NOTE: Always operates in strict mode for last chunk
|
||||
|
||||
// By default accepts both padded and non-padded variants, only strict base64
|
||||
export function fromBase64(str, options) {
|
||||
if (typeof options === 'string') options = { format: options } // Compat due to usage, TODO: remove
|
||||
if (!options) return fromBase64common(str, false, 'both', 'uint8', null)
|
||||
const { format = 'uint8', padding = 'both', ...rest } = options
|
||||
return fromBase64common(str, false, padding, format, rest)
|
||||
}
|
||||
|
||||
// By default accepts only non-padded strict base64url
|
||||
export function fromBase64url(str, options) {
|
||||
if (!options) return fromBase64common(str, true, false, 'uint8', null)
|
||||
const { format = 'uint8', padding = false, ...rest } = options
|
||||
return fromBase64common(str, true, padding, format, rest)
|
||||
}
|
||||
|
||||
// By default accepts both padded and non-padded variants, base64 or base64url
|
||||
export function fromBase64any(str, { format = 'uint8', padding = 'both', ...rest } = {}) {
|
||||
const isBase64url = !str.includes('+') && !str.includes('/') // likely to fail fast, as most input is non-url, also double scan is faster than regex
|
||||
return fromBase64common(str, isBase64url, padding, format, rest)
|
||||
}
|
||||
|
||||
function fromBase64common(str, isBase64url, padding, format, rest) {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (rest !== null) assertEmptyRest(rest)
|
||||
const auto = padding === 'both' ? str.endsWith('=') : undefined
|
||||
// Older JSC supporting Uint8Array.fromBase64 lacks proper checks
|
||||
if (padding === true || auto === true) {
|
||||
if (str.length % 4 !== 0) throw new SyntaxError(E_PADDING) // JSC misses this
|
||||
if (str[str.length - 3] === '=') throw new SyntaxError(E_PADDING) // no more than two = at the end
|
||||
} else if (padding === false || auto === false) {
|
||||
if (str.length % 4 === 1) throw new SyntaxError(E_LENGTH) // JSC misses this in fromBase64
|
||||
if (padding === false && str.endsWith('=')) {
|
||||
throw new SyntaxError('Did not expect padding in base64 input') // inclusion is checked separately
|
||||
}
|
||||
} else {
|
||||
throw new TypeError('Invalid padding option')
|
||||
}
|
||||
|
||||
return typedView(fromBase64impl(str, isBase64url, padding), format)
|
||||
}
|
||||
|
||||
// ASCII whitespace is U+0009 TAB, U+000A LF, U+000C FF, U+000D CR, or U+0020 SPACE
|
||||
const ASCII_WHITESPACE = /[\t\n\f\r ]/ // non-u for JSC perf
|
||||
|
||||
function noWhitespaceSeen(str, arr) {
|
||||
const at = str.indexOf('=', str.length - 3)
|
||||
const paddingLength = at >= 0 ? str.length - at : 0
|
||||
const chars = str.length - paddingLength
|
||||
const e = chars % 4 // extra chars past blocks of 4
|
||||
const b = arr.length - ((chars - e) / 4) * 3 // remaining bytes not covered by full blocks of chars
|
||||
return (e === 0 && b === 0) || (e === 2 && b === 1) || (e === 3 && b === 2)
|
||||
}
|
||||
|
||||
let fromBase64impl
|
||||
if (!skipWeb && Uint8Array.fromBase64) {
|
||||
// NOTICE: this is actually slower than our JS impl in older JavaScriptCore and (slightly) in SpiderMonkey, but faster on V8 and new JavaScriptCore
|
||||
fromBase64impl = (str, isBase64url, padding) => {
|
||||
const alphabet = isBase64url ? 'base64url' : 'base64'
|
||||
|
||||
let arr
|
||||
if (padding === true) {
|
||||
// Padding is required from user, and we already checked that string length is divisible by 4
|
||||
// Padding might still be wrong due to whitespace, but in that case native impl throws expected error
|
||||
arr = Uint8Array.fromBase64(str, { alphabet, lastChunkHandling: 'strict' })
|
||||
} else {
|
||||
try {
|
||||
const padded = str.length % 4 > 0 ? `${str}${'='.repeat(4 - (str.length % 4))}` : str
|
||||
arr = Uint8Array.fromBase64(padded, { alphabet, lastChunkHandling: 'strict' })
|
||||
} catch (err) {
|
||||
// Normalize error: whitespace in input could have caused added padding to be invalid
|
||||
// But reporting that as a padding error would be confusing
|
||||
throw ASCII_WHITESPACE.test(str) ? new SyntaxError(E_CHAR) : err
|
||||
}
|
||||
}
|
||||
|
||||
// We don't allow whitespace in input, but that can be rechecked based on output length
|
||||
// All other chars are checked natively
|
||||
if (!noWhitespaceSeen(str, arr)) throw new SyntaxError(E_CHAR)
|
||||
return arr
|
||||
}
|
||||
} else if (haveNativeBuffer) {
|
||||
fromBase64impl = (str, isBase64url, padding) => {
|
||||
const arr = Buffer.from(str, 'base64')
|
||||
// Rechecking by re-encoding is cheaper than regexes on Node.js
|
||||
const got = isBase64url ? maybeUnpad(str, padding === false) : maybePad(str, padding !== true)
|
||||
const valid = isBase64url ? arr.base64urlSlice(0, arr.length) : arr.base64Slice(0, arr.length)
|
||||
if (got !== valid) throw new SyntaxError(E_PADDING)
|
||||
return arr // fully checked
|
||||
}
|
||||
} else if (shouldUseAtob) {
|
||||
// atob is faster than manual parsing on Hermes
|
||||
fromBase64impl = (str, isBase64url, padding) => {
|
||||
let arr
|
||||
if (isBase64url) {
|
||||
if (/[\t\n\f\r +/]/.test(str)) throw new SyntaxError(E_CHAR) // atob verifies other invalid input
|
||||
str = str.replaceAll('-', '+').replaceAll('_', '/') // from url to normal
|
||||
}
|
||||
|
||||
try {
|
||||
arr = encodeLatin1(atob(str))
|
||||
} catch {
|
||||
throw new SyntaxError(E_CHAR) // convert atob errors
|
||||
}
|
||||
|
||||
if (!isBase64url && !noWhitespaceSeen(str, arr)) throw new SyntaxError(E_CHAR) // base64url checks input above
|
||||
|
||||
if (arr.length % 3 !== 0) {
|
||||
// Check last chunk to be strict if it was incomplete
|
||||
const expected = toBase64(arr.subarray(-(arr.length % 3))) // str is normalized to non-url already
|
||||
const end = str.length % 4 === 0 ? str.slice(-4) : str.slice(-(str.length % 4)).padEnd(4, '=')
|
||||
if (expected !== end) throw new SyntaxError(E_LAST)
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
} else {
|
||||
fromBase64impl = (str, isBase64url, padding) => js.fromBase64(str, isBase64url) // validated in js
|
||||
}
|
||||
76
node_modules/@exodus/bytes/bech32.d.ts
generated
vendored
Normal file
76
node_modules/@exodus/bytes/bech32.d.ts
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
/**
|
||||
* Implements bech32 and bech32m from
|
||||
* [BIP-0173](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#specification)
|
||||
* and [BIP-0350](https://github.com/bitcoin/bips/blob/master/bip-0350.mediawiki#specification).
|
||||
*
|
||||
* ```js
|
||||
* import { fromBech32, toBech32 } from '@exodus/bytes/bech32.js'
|
||||
* import { fromBech32m, toBech32m } from '@exodus/bytes/bech32.js'
|
||||
* import { getPrefix } from '@exodus/bytes/bech32.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/bech32.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Result of decoding a bech32 or bech32m string
|
||||
*/
|
||||
export interface Bech32DecodeResult {
|
||||
/** The human-readable prefix */
|
||||
prefix: string;
|
||||
/** The decoded bytes */
|
||||
bytes: Uint8ArrayBuffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode bytes to a bech32 string
|
||||
*
|
||||
* @param prefix - The human-readable prefix (e.g., 'bc' for Bitcoin)
|
||||
* @param bytes - The input bytes to encode
|
||||
* @param limit - Maximum length of the encoded string (default: 90)
|
||||
* @returns The bech32 encoded string
|
||||
*/
|
||||
export function toBech32(prefix: string, bytes: Uint8Array, limit?: number): string;
|
||||
|
||||
/**
|
||||
* Decode a bech32 string to bytes
|
||||
*
|
||||
* @param string - The bech32 encoded string
|
||||
* @param limit - Maximum length of the input string (default: 90)
|
||||
* @returns The decoded prefix and bytes
|
||||
*/
|
||||
export function fromBech32(string: string, limit?: number): Bech32DecodeResult;
|
||||
|
||||
/**
|
||||
* Encode bytes to a bech32m string
|
||||
*
|
||||
* @param prefix - The human-readable prefix (e.g., 'bc' for Bitcoin)
|
||||
* @param bytes - The input bytes to encode
|
||||
* @param limit - Maximum length of the encoded string (default: 90)
|
||||
* @returns The bech32m encoded string
|
||||
*/
|
||||
export function toBech32m(prefix: string, bytes: Uint8Array, limit?: number): string;
|
||||
|
||||
/**
|
||||
* Decode a bech32m string to bytes
|
||||
*
|
||||
* @param string - The bech32m encoded string
|
||||
* @param limit - Maximum length of the input string (default: 90)
|
||||
* @returns The decoded prefix and bytes
|
||||
*/
|
||||
export function fromBech32m(string: string, limit?: number): Bech32DecodeResult;
|
||||
|
||||
/**
|
||||
* Extract the prefix from a bech32 or bech32m string without full validation
|
||||
*
|
||||
* This is a quick check that skips most validation.
|
||||
*
|
||||
* @param string - The bech32/bech32m encoded string
|
||||
* @param limit - Maximum length of the input string (default: 90)
|
||||
* @returns The lowercase prefix
|
||||
*/
|
||||
export function getPrefix(string: string, limit?: number): string;
|
||||
257
node_modules/@exodus/bytes/bech32.js
generated
vendored
Normal file
257
node_modules/@exodus/bytes/bech32.js
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { nativeEncoder, E_STRING } from './fallback/_utils.js'
|
||||
import { decodeAscii, encodeAscii, encodeLatin1 } from './fallback/latin1.js'
|
||||
|
||||
const alphabet = [...'qpzry9x8gf2tvdw0s3jn54khce6mua7l']
|
||||
const BECH32 = 1
|
||||
const BECH32M = 0x2b_c8_30_a3
|
||||
|
||||
const E_SIZE = 'Input length is out of range'
|
||||
const E_PREFIX = 'Missing or invalid prefix'
|
||||
const E_MIXED = 'Mixed-case string'
|
||||
const E_PADDING = 'Padding is invalid'
|
||||
const E_CHECKSUM = 'Invalid checksum'
|
||||
const E_CHARACTER = 'Non-bech32 character'
|
||||
|
||||
// nativeEncoder path uses encodeAscii which asserts ascii, otherwise we have 0-255 bytes from encodeLatin1
|
||||
const c2x = new Int8Array(nativeEncoder ? 128 : 256).fill(-1)
|
||||
const x2c = new Uint8Array(32)
|
||||
for (let i = 0; i < alphabet.length; i++) {
|
||||
const c = alphabet[i].charCodeAt(0)
|
||||
c2x[c] = i
|
||||
x2c[i] = c
|
||||
}
|
||||
|
||||
// checksum size is 30 bits, 0x3f_ff_ff_ff
|
||||
// The good thing about the checksum is that it's linear over every bit
|
||||
const poly0 = new Uint32Array(32) // just precache all possible ones, it's only 1 KiB
|
||||
const p = (x) => ((x & 0x1_ff_ff_ff) << 5) ^ poly0[x >> 25]
|
||||
for (let i = 0; i < 32; i++) {
|
||||
poly0[i] =
|
||||
(i & 0b0_0001 ? 0x3b_6a_57_b2 : 0) ^
|
||||
(i & 0b0_0010 ? 0x26_50_8e_6d : 0) ^
|
||||
(i & 0b0_0100 ? 0x1e_a1_19_fa : 0) ^
|
||||
(i & 0b0_1000 ? 0x3d_42_33_dd : 0) ^
|
||||
(i & 0b1_0000 ? 0x2a_14_62_b3 : 0)
|
||||
}
|
||||
|
||||
// 7 KiB more for faster p6/p8
|
||||
const poly1 = new Uint32Array(32)
|
||||
const poly2 = new Uint32Array(32)
|
||||
const poly3 = new Uint32Array(32)
|
||||
const poly4 = new Uint32Array(32)
|
||||
const poly5 = new Uint32Array(32)
|
||||
const poly6 = new Uint32Array(32)
|
||||
const poly7 = new Uint32Array(32)
|
||||
for (let i = 0; i < 32; i++) {
|
||||
// poly0[i] === p(p(p(p(p(p(i))))))
|
||||
poly1[i] = p(poly0[i]) // aka p(p(p(p(p(p(i << 5))))))
|
||||
poly2[i] = p(poly1[i]) // aka p(p(p(p(p(p(i << 10))))))
|
||||
poly3[i] = p(poly2[i]) // aka p(p(p(p(p(p(i << 15))))))
|
||||
poly4[i] = p(poly3[i]) // aka p(p(p(p(p(p(i << 20))))))
|
||||
poly5[i] = p(poly4[i]) // aka p(p(p(p(p(p(i << 25))))))
|
||||
poly6[i] = p(poly5[i])
|
||||
poly7[i] = p(poly6[i])
|
||||
}
|
||||
|
||||
function p6(x) {
|
||||
// Same as: return p(p(p(p(p(p(x))))))
|
||||
const x0 = x & 0x1f
|
||||
const x1 = (x >> 5) & 0x1f
|
||||
const x2 = (x >> 10) & 0x1f
|
||||
const x3 = (x >> 15) & 0x1f
|
||||
const x4 = (x >> 20) & 0x1f
|
||||
const x5 = (x >> 25) & 0x1f
|
||||
return poly0[x0] ^ poly1[x1] ^ poly2[x2] ^ poly3[x3] ^ poly4[x4] ^ poly5[x5]
|
||||
}
|
||||
|
||||
function p8(x) {
|
||||
// Same as: return p(p(p(p(p(p(p(p(x))))))))
|
||||
const x0 = x & 0x1f
|
||||
const x1 = (x >> 5) & 0x1f
|
||||
const x2 = (x >> 10) & 0x1f
|
||||
const x3 = (x >> 15) & 0x1f
|
||||
const x4 = (x >> 20) & 0x1f
|
||||
const x5 = (x >> 25) & 0x1f
|
||||
return poly2[x0] ^ poly3[x1] ^ poly4[x2] ^ poly5[x3] ^ poly6[x4] ^ poly7[x5]
|
||||
}
|
||||
|
||||
// p(p(p(p(p(p(chk) ^ x0) ^ x1) ^ x2) ^ x3) ^ x4) ^ x5 === p6(chk) ^ merge(x0, x1, x2, x3, x4, x5)
|
||||
const merge = (a, b, c, d, e, f) => f ^ (e << 5) ^ (d << 10) ^ (c << 15) ^ (b << 20) ^ (a << 25)
|
||||
|
||||
const prefixCache = new Map() // Cache 10 of them
|
||||
|
||||
function pPrefix(prefix) {
|
||||
if (prefix === 'bc') return 0x2_31_80_43 // perf
|
||||
const cached = prefixCache.get(prefix)
|
||||
if (cached !== undefined) return cached
|
||||
|
||||
// bech32_hrp_expand(s): [ord(x) >> 5 for x in s] + [0] + [ord(x) & 31 for x in s]
|
||||
// We can do this in a single scan due to linearity, but it's not very beneficial
|
||||
let chk = 1 // it starts with one (see def bech32_polymod in BIP_0173)
|
||||
const length = prefix.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const c = prefix.charCodeAt(i)
|
||||
if (c < 33 || c > 126) throw new Error(E_PREFIX) // each character having a value in the range [33-126]
|
||||
chk = p(chk) ^ (c >> 5)
|
||||
}
|
||||
|
||||
chk = p(chk) // <= for + [0]
|
||||
for (let i = 0; i < length; i++) {
|
||||
const c = prefix.charCodeAt(i)
|
||||
chk = p(chk) ^ (c & 0x1f)
|
||||
}
|
||||
|
||||
if (prefixCache.size < 10) prefixCache.set(prefix, chk)
|
||||
return chk
|
||||
}
|
||||
|
||||
function toBech32enc(prefix, bytes, limit, encoding) {
|
||||
if (typeof prefix !== 'string' || !prefix) throw new TypeError(E_PREFIX)
|
||||
if (typeof limit !== 'number') throw new TypeError(E_SIZE)
|
||||
assertUint8(bytes)
|
||||
const bytesLength = bytes.length
|
||||
const wordsLength = Math.ceil((bytesLength * 8) / 5)
|
||||
if (!(prefix.length + 7 + wordsLength <= limit)) throw new TypeError(E_SIZE)
|
||||
prefix = prefix.toLowerCase()
|
||||
const out = new Uint8Array(wordsLength + 6)
|
||||
|
||||
let chk = pPrefix(prefix)
|
||||
let i = 0, j = 0 // prettier-ignore
|
||||
|
||||
// This loop is just an optimization of the next one
|
||||
for (const length4 = bytesLength - 4; i < length4; i += 5, j += 8) {
|
||||
const b0 = bytes[i], b1 = bytes[i + 1], b2 = bytes[i + 2], b3 = bytes[i + 3], b4 = bytes[i + 4] // prettier-ignore
|
||||
const x0 = b0 >> 3
|
||||
const x1 = ((b0 << 2) & 0x1f) | (b1 >> 6)
|
||||
const x2 = (b1 >> 1) & 0x1f
|
||||
const x3 = ((b1 << 4) & 0x1f) | (b2 >> 4)
|
||||
const x4 = ((b2 << 1) & 0x1f) | (b3 >> 7)
|
||||
const x5 = (b3 >> 2) & 0x1f
|
||||
const x6 = ((b3 << 3) & 0x1f) | (b4 >> 5)
|
||||
const x7 = b4 & 0x1f
|
||||
chk = merge(x2, x3, x4, x5, x6, x7) ^ poly0[x1] ^ poly1[x0] ^ p8(chk)
|
||||
out[j] = x2c[x0]
|
||||
out[j + 1] = x2c[x1]
|
||||
out[j + 2] = x2c[x2]
|
||||
out[j + 3] = x2c[x3]
|
||||
out[j + 4] = x2c[x4]
|
||||
out[j + 5] = x2c[x5]
|
||||
out[j + 6] = x2c[x6]
|
||||
out[j + 7] = x2c[x7]
|
||||
}
|
||||
|
||||
let value = 0, bits = 0 // prettier-ignore
|
||||
for (; i < bytesLength; i++) {
|
||||
value = ((value & 0xf) << 8) | bytes[i]
|
||||
bits += 3
|
||||
const x = (value >> bits) & 0x1f
|
||||
chk = p(chk) ^ x
|
||||
out[j++] = x2c[x]
|
||||
if (bits >= 5) {
|
||||
bits -= 5
|
||||
const x = (value >> bits) & 0x1f
|
||||
chk = p(chk) ^ x
|
||||
out[j++] = x2c[x]
|
||||
}
|
||||
}
|
||||
|
||||
if (bits > 0) {
|
||||
const x = (value << (5 - bits)) & 0x1f
|
||||
chk = p(chk) ^ x
|
||||
out[j++] = x2c[x]
|
||||
}
|
||||
|
||||
chk = encoding ^ p6(chk)
|
||||
out[j++] = x2c[(chk >> 25) & 0x1f]
|
||||
out[j++] = x2c[(chk >> 20) & 0x1f]
|
||||
out[j++] = x2c[(chk >> 15) & 0x1f]
|
||||
out[j++] = x2c[(chk >> 10) & 0x1f]
|
||||
out[j++] = x2c[(chk >> 5) & 0x1f]
|
||||
out[j++] = x2c[(chk >> 0) & 0x1f]
|
||||
|
||||
return prefix + '1' + decodeAscii(out) // suboptimal in barebones, but actually ok in Hermes for not to care atm
|
||||
}
|
||||
|
||||
function assertDecodeArgs(str, limit) {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (typeof limit !== 'number' || str.length < 8 || !(str.length <= limit)) throw new Error(E_SIZE)
|
||||
}
|
||||
|
||||
// this is instant on 8-bit strings
|
||||
const NON_LATIN = /[^\x00-\xFF]/ // eslint-disable-line no-control-regex
|
||||
|
||||
function fromBech32enc(str, limit, encoding) {
|
||||
assertDecodeArgs(str, limit)
|
||||
const lower = str.toLowerCase()
|
||||
if (str !== lower) {
|
||||
if (str !== str.toUpperCase()) throw new Error(E_MIXED)
|
||||
str = lower
|
||||
}
|
||||
|
||||
const split = str.lastIndexOf('1')
|
||||
if (split <= 0) throw new Error(E_PREFIX)
|
||||
const prefix = str.slice(0, split)
|
||||
const charsLength = str.length - split - 1
|
||||
const wordsLength = charsLength - 6
|
||||
if (wordsLength < 0) throw new Error(E_SIZE)
|
||||
const bytesLength = (wordsLength * 5) >> 3
|
||||
const slice = str.slice(split + 1)
|
||||
if (!nativeEncoder && NON_LATIN.test(slice)) throw new SyntaxError(E_CHARACTER) // otherwise can't use encodeLatin1
|
||||
const c = nativeEncoder ? encodeAscii(slice, E_CHARACTER) : encodeLatin1(slice) // suboptimal, but only affects non-Hermes barebones
|
||||
const bytes = new Uint8Array(bytesLength)
|
||||
|
||||
let chk = pPrefix(prefix)
|
||||
let i = 0, j = 0 // prettier-ignore
|
||||
|
||||
// This loop is just an optimization of the next one
|
||||
for (const length7 = wordsLength - 7; i < length7; i += 8, j += 5) {
|
||||
const c0 = c[i], c1 = c[i + 1], c2 = c[i + 2], c3 = c[i + 3], c4 = c[i + 4], c5 = c[i + 5], c6 = c[i + 6], c7 = c[i + 7] // prettier-ignore
|
||||
const x0 = c2x[c0], x1 = c2x[c1], x2 = c2x[c2], x3 = c2x[c3], x4 = c2x[c4], x5 = c2x[c5], x6 = c2x[c6], x7 = c2x[c7] // prettier-ignore
|
||||
if (x0 < 0 || x1 < 0 || x2 < 0 || x3 < 0 || x4 < 0 || x5 < 0 || x6 < 0 || x7 < 0) throw new SyntaxError(E_CHARACTER) // prettier-ignore
|
||||
chk = merge(x2, x3, x4, x5, x6, x7) ^ poly0[x1] ^ poly1[x0] ^ p8(chk)
|
||||
bytes[j] = (x0 << 3) | (x1 >> 2)
|
||||
bytes[j + 1] = (((x1 << 6) | (x2 << 1)) & 0xff) | (x3 >> 4)
|
||||
bytes[j + 2] = ((x3 << 4) & 0xff) | (x4 >> 1)
|
||||
bytes[j + 3] = ((((x4 << 5) | x5) << 2) & 0xff) | (x6 >> 3)
|
||||
bytes[j + 4] = ((x6 << 5) & 0xff) | x7
|
||||
}
|
||||
|
||||
let value = 0, bits = 0 // prettier-ignore
|
||||
for (; i < wordsLength; i++) {
|
||||
const x = c2x[c[i]]
|
||||
if (x < 0) throw new SyntaxError(E_CHARACTER)
|
||||
chk = p(chk) ^ x
|
||||
value = (value << 5) | x
|
||||
bits += 5
|
||||
if (bits >= 8) {
|
||||
bits -= 8
|
||||
bytes[j++] = (value >> bits) & 0xff
|
||||
}
|
||||
}
|
||||
|
||||
if (bits >= 5 || (value << (8 - bits)) & 0xff) throw new Error(E_PADDING)
|
||||
|
||||
// Checksum
|
||||
{
|
||||
const c0 = c[i], c1 = c[i + 1], c2 = c[i + 2], c3 = c[i + 3], c4 = c[i + 4], c5 = c[i + 5] // prettier-ignore
|
||||
const x0 = c2x[c0], x1 = c2x[c1], x2 = c2x[c2], x3 = c2x[c3], x4 = c2x[c4], x5 = c2x[c5] // prettier-ignore
|
||||
if (x0 < 0 || x1 < 0 || x2 < 0 || x3 < 0 || x4 < 0 || x5 < 0) throw new SyntaxError(E_CHARACTER)
|
||||
if ((merge(x0, x1, x2, x3, x4, x5) ^ p6(chk)) !== encoding) throw new Error(E_CHECKSUM)
|
||||
}
|
||||
|
||||
return { prefix, bytes }
|
||||
}
|
||||
|
||||
// This is designed to be a very quick check, skipping all other validation
|
||||
export function getPrefix(str, limit = 90) {
|
||||
assertDecodeArgs(str, limit)
|
||||
const split = str.lastIndexOf('1')
|
||||
if (split <= 0) throw new Error(E_PREFIX)
|
||||
return str.slice(0, split).toLowerCase()
|
||||
}
|
||||
|
||||
export const toBech32 = (prefix, bytes, limit = 90) => toBech32enc(prefix, bytes, limit, BECH32)
|
||||
export const fromBech32 = (str, limit = 90) => fromBech32enc(str, limit, BECH32)
|
||||
export const toBech32m = (prefix, bytes, limit = 90) => toBech32enc(prefix, bytes, limit, BECH32M)
|
||||
export const fromBech32m = (str, limit = 90) => fromBech32enc(str, limit, BECH32M)
|
||||
48
node_modules/@exodus/bytes/bigint.d.ts
generated
vendored
Normal file
48
node_modules/@exodus/bytes/bigint.d.ts
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* Convert between BigInt and Uint8Array
|
||||
*
|
||||
* ```js
|
||||
* import { fromBigInt, toBigInt } from '@exodus/bytes/bigint.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/bigint.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Options for converting BigInt to bytes
|
||||
*/
|
||||
export interface FromBigIntOptions {
|
||||
/** The length in bytes of the output array */
|
||||
length: number;
|
||||
/** Output format (default: 'uint8') */
|
||||
format?: OutputFormat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a BigInt to a Uint8Array or Buffer
|
||||
*
|
||||
* The output bytes are in big-endian format.
|
||||
*
|
||||
* Throws if the BigInt is negative or cannot fit into the specified length.
|
||||
*
|
||||
* @param bigint - The BigInt to convert (must be non-negative)
|
||||
* @param options - Conversion options
|
||||
* @returns The converted bytes in big-endian format
|
||||
*/
|
||||
export function fromBigInt(bigint: bigint, options: { length: number; format?: 'uint8' }): Uint8ArrayBuffer;
|
||||
export function fromBigInt(bigint: bigint, options: { length: number; format: 'buffer' }): Buffer;
|
||||
export function fromBigInt(bigint: bigint, options: FromBigIntOptions): Uint8ArrayBuffer | Buffer;
|
||||
|
||||
/**
|
||||
* Convert a Uint8Array or Buffer to a BigInt
|
||||
*
|
||||
* The bytes are interpreted as a big-endian unsigned integer.
|
||||
*
|
||||
* @param arr - The bytes to convert
|
||||
* @returns The BigInt representation
|
||||
*/
|
||||
export function toBigInt(arr: Uint8Array): bigint;
|
||||
14
node_modules/@exodus/bytes/bigint.js
generated
vendored
Normal file
14
node_modules/@exodus/bytes/bigint.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import { toHex, fromHex } from '@exodus/bytes/hex.js'
|
||||
import { assert } from './fallback/_utils.js'
|
||||
|
||||
const _0n = BigInt(0)
|
||||
|
||||
export function fromBigInt(x, { length, format } = {}) {
|
||||
assert(Number.isSafeInteger(length) && length > 0, 'Expected length arg to be a positive integer')
|
||||
assert(typeof x === 'bigint' && x >= _0n, 'Expected a non-negative bigint')
|
||||
const hex = x.toString(16)
|
||||
assert(length * 2 >= hex.length, `Can not fit supplied number into ${length} bytes`)
|
||||
return fromHex(hex.padStart(length * 2, '0'), format)
|
||||
}
|
||||
|
||||
export const toBigInt = (a) => BigInt('0x' + (toHex(a) || '0'))
|
||||
29
node_modules/@exodus/bytes/encoding-browser.browser.js
generated
vendored
Normal file
29
node_modules/@exodus/bytes/encoding-browser.browser.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import {
|
||||
fromSource,
|
||||
getBOMEncoding,
|
||||
normalizeEncoding,
|
||||
E_ENCODING,
|
||||
} from './fallback/encoding.api.js'
|
||||
import labels from './fallback/encoding.labels.js'
|
||||
|
||||
// Lite-weight version which re-exports existing implementations on browsers,
|
||||
// while still being aliased to the full impl in RN and Node.js
|
||||
|
||||
// WARNING: Note that browsers have bugs (which hopefully will get fixed soon)
|
||||
|
||||
const { TextDecoder, TextEncoder, TextDecoderStream, TextEncoderStream } = globalThis
|
||||
|
||||
export { normalizeEncoding, getBOMEncoding, labelToName } from './fallback/encoding.api.js'
|
||||
export { TextDecoder, TextEncoder, TextDecoderStream, TextEncoderStream }
|
||||
|
||||
// https://encoding.spec.whatwg.org/#decode
|
||||
export function legacyHookDecode(input, fallbackEncoding = 'utf-8') {
|
||||
let u8 = fromSource(input)
|
||||
const bomEncoding = getBOMEncoding(u8)
|
||||
if (bomEncoding) u8 = u8.subarray(bomEncoding === 'utf-8' ? 3 : 2)
|
||||
const enc = bomEncoding ?? normalizeEncoding(fallbackEncoding) // "the byte order mark is more authoritative than anything else"
|
||||
if (enc === 'utf-8') return new TextDecoder('utf-8', { ignoreBOM: true }).decode(u8) // fast path
|
||||
if (enc === 'replacement') return u8.byteLength > 0 ? '\uFFFD' : ''
|
||||
if (!Object.hasOwn(labels, enc)) throw new RangeError(E_ENCODING)
|
||||
return new TextDecoder(enc, { ignoreBOM: true }).decode(u8)
|
||||
}
|
||||
24
node_modules/@exodus/bytes/encoding-browser.d.ts
generated
vendored
Normal file
24
node_modules/@exodus/bytes/encoding-browser.d.ts
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Same as `@exodus/bytes/encoding.js`, but in browsers instead of polyfilling just uses whatever the
|
||||
* browser provides, drastically reducing the bundle size (to less than 2 KiB gzipped).
|
||||
*
|
||||
* ```js
|
||||
* import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding-browser.js'
|
||||
* import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-browser.js' // Requires Streams
|
||||
*
|
||||
* // Hooks for standards
|
||||
* import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding-browser.js'
|
||||
* ```
|
||||
*
|
||||
* Under non-browser engines (Node.js, React Native, etc.) a full polyfill is used as those platforms
|
||||
* do not provide sufficiently complete / non-buggy `TextDecoder` APIs.
|
||||
*
|
||||
* > [!NOTE]
|
||||
* > Implementations in browsers [have bugs](https://docs.google.com/spreadsheets/d/1pdEefRG6r9fZy61WHGz0TKSt8cO4ISWqlpBN5KntIvQ/edit),
|
||||
* > but they are fixing them and the expected update window is short.\
|
||||
* > If you want to circumvent browser bugs, use full `@exodus/bytes/encoding.js` import.
|
||||
*
|
||||
* @module @exodus/bytes/encoding-browser.js
|
||||
*/
|
||||
|
||||
export * from './encoding.js'
|
||||
1
node_modules/@exodus/bytes/encoding-browser.js
generated
vendored
Normal file
1
node_modules/@exodus/bytes/encoding-browser.js
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export * from './encoding.js'
|
||||
1
node_modules/@exodus/bytes/encoding-browser.native.js
generated
vendored
Normal file
1
node_modules/@exodus/bytes/encoding-browser.native.js
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export * from './encoding.js'
|
||||
62
node_modules/@exodus/bytes/encoding-lite.d.ts
generated
vendored
Normal file
62
node_modules/@exodus/bytes/encoding-lite.d.ts
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/**
|
||||
* The exact same exports as `@exodus/bytes/encoding.js` are also exported as
|
||||
* `@exodus/bytes/encoding-lite.js`, with the difference that the lite version does not load
|
||||
* multi-byte `TextDecoder` encodings by default to reduce bundle size 10x.
|
||||
*
|
||||
* ```js
|
||||
* import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding-lite.js'
|
||||
* import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-lite.js' // Requires Streams
|
||||
*
|
||||
* // Hooks for standards
|
||||
* import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding-lite.js'
|
||||
* ```
|
||||
*
|
||||
* The only affected encodings are: `gbk`, `gb18030`, `big5`, `euc-jp`, `iso-2022-jp`, `shift_jis`
|
||||
* and their [labels](https://encoding.spec.whatwg.org/#names-and-labels) when used with `TextDecoder`.
|
||||
*
|
||||
* Legacy single-byte encodingds are loaded by default in both cases.
|
||||
*
|
||||
* `TextEncoder` and hooks for standards (including `labelToName` / `normalizeEncoding`) do not have any behavior
|
||||
* differences in the lite version and support full range if inputs.
|
||||
*
|
||||
* To avoid inconsistencies, the exported classes and methods are exactly the same objects.
|
||||
*
|
||||
* ```console
|
||||
* > lite = require('@exodus/bytes/encoding-lite.js')
|
||||
* [Module: null prototype] {
|
||||
* TextDecoder: [class TextDecoder],
|
||||
* TextDecoderStream: [class TextDecoderStream],
|
||||
* TextEncoder: [class TextEncoder],
|
||||
* TextEncoderStream: [class TextEncoderStream],
|
||||
* getBOMEncoding: [Function: getBOMEncoding],
|
||||
* labelToName: [Function: labelToName],
|
||||
* legacyHookDecode: [Function: legacyHookDecode],
|
||||
* normalizeEncoding: [Function: normalizeEncoding]
|
||||
* }
|
||||
* > new lite.TextDecoder('big5').decode(Uint8Array.of(0x25))
|
||||
* Uncaught:
|
||||
* Error: Legacy multi-byte encodings are disabled in /encoding-lite.js, use /encoding.js for full encodings range support
|
||||
*
|
||||
* > full = require('@exodus/bytes/encoding.js')
|
||||
* [Module: null prototype] {
|
||||
* TextDecoder: [class TextDecoder],
|
||||
* TextDecoderStream: [class TextDecoderStream],
|
||||
* TextEncoder: [class TextEncoder],
|
||||
* TextEncoderStream: [class TextEncoderStream],
|
||||
* getBOMEncoding: [Function: getBOMEncoding],
|
||||
* labelToName: [Function: labelToName],
|
||||
* legacyHookDecode: [Function: legacyHookDecode],
|
||||
* normalizeEncoding: [Function: normalizeEncoding]
|
||||
* }
|
||||
* > full.TextDecoder === lite.TextDecoder
|
||||
* true
|
||||
* > new full.TextDecoder('big5').decode(Uint8Array.of(0x25))
|
||||
* '%'
|
||||
* > new lite.TextDecoder('big5').decode(Uint8Array.of(0x25))
|
||||
* '%'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/encoding-lite.js
|
||||
*/
|
||||
|
||||
export * from './encoding.js'
|
||||
10
node_modules/@exodus/bytes/encoding-lite.js
generated
vendored
Normal file
10
node_modules/@exodus/bytes/encoding-lite.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
export {
|
||||
TextDecoder,
|
||||
TextEncoder,
|
||||
TextDecoderStream,
|
||||
TextEncoderStream,
|
||||
normalizeEncoding,
|
||||
getBOMEncoding,
|
||||
labelToName,
|
||||
legacyHookDecode,
|
||||
} from './fallback/encoding.js'
|
||||
140
node_modules/@exodus/bytes/encoding.d.ts
generated
vendored
Normal file
140
node_modules/@exodus/bytes/encoding.d.ts
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
/**
|
||||
* Implements the [Encoding standard](https://encoding.spec.whatwg.org/):
|
||||
* [TextDecoder](https://encoding.spec.whatwg.org/#interface-textdecoder),
|
||||
* [TextEncoder](https://encoding.spec.whatwg.org/#interface-textencoder),
|
||||
* [TextDecoderStream](https://encoding.spec.whatwg.org/#interface-textdecoderstream),
|
||||
* [TextEncoderStream](https://encoding.spec.whatwg.org/#interface-textencoderstream),
|
||||
* some [hooks](https://encoding.spec.whatwg.org/#specification-hooks).
|
||||
*
|
||||
* ```js
|
||||
* import { TextDecoder, TextEncoder } from '@exodus/bytes/encoding.js'
|
||||
* import { TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding.js' // Requires Streams
|
||||
*
|
||||
* // Hooks for standards
|
||||
* import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/encoding.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
/**
|
||||
* Convert an encoding [label](https://encoding.spec.whatwg.org/#names-and-labels) to its name,
|
||||
* as an ASCII-lowercased string.
|
||||
*
|
||||
* If an encoding with that label does not exist, returns `null`.
|
||||
*
|
||||
* This is the same as [`decoder.encoding` getter](https://encoding.spec.whatwg.org/#dom-textdecoder-encoding),
|
||||
* except that it:
|
||||
* 1. Supports [`replacement` encoding](https://encoding.spec.whatwg.org/#replacement) and its
|
||||
* [labels](https://encoding.spec.whatwg.org/#ref-for-replacement%E2%91%A1)
|
||||
* 2. Does not throw for invalid labels and instead returns `null`
|
||||
*
|
||||
* It is identical to:
|
||||
* ```js
|
||||
* labelToName(label)?.toLowerCase() ?? null
|
||||
* ```
|
||||
*
|
||||
* All encoding names are also valid labels for corresponding encodings.
|
||||
*
|
||||
* @param label - The encoding label to normalize
|
||||
* @returns The normalized encoding name, or null if invalid
|
||||
*/
|
||||
export function normalizeEncoding(label: string): string | null;
|
||||
|
||||
/**
|
||||
* Implements [BOM sniff](https://encoding.spec.whatwg.org/#bom-sniff) legacy hook.
|
||||
*
|
||||
* Given a `TypedArray` or an `ArrayBuffer` instance `input`, returns either of:
|
||||
* - `'utf-8'`, if `input` starts with UTF-8 byte order mark.
|
||||
* - `'utf-16le'`, if `input` starts with UTF-16LE byte order mark.
|
||||
* - `'utf-16be'`, if `input` starts with UTF-16BE byte order mark.
|
||||
* - `null` otherwise.
|
||||
*
|
||||
* @param input - The bytes to check for BOM
|
||||
* @returns The encoding ('utf-8', 'utf-16le', 'utf-16be'), or null if no BOM found
|
||||
*/
|
||||
export function getBOMEncoding(
|
||||
input: ArrayBufferLike | ArrayBufferView
|
||||
): 'utf-8' | 'utf-16le' | 'utf-16be' | null;
|
||||
|
||||
/**
|
||||
* Implements [decode](https://encoding.spec.whatwg.org/#decode) legacy hook.
|
||||
*
|
||||
* Given a `TypedArray` or an `ArrayBuffer` instance `input` and an optional `fallbackEncoding`
|
||||
* encoding [label](https://encoding.spec.whatwg.org/#names-and-labels),
|
||||
* sniffs encoding from BOM with `fallbackEncoding` fallback and then
|
||||
* decodes the `input` using that encoding, skipping BOM if it was present.
|
||||
*
|
||||
* Notes:
|
||||
*
|
||||
* - BOM-sniffed encoding takes precedence over `fallbackEncoding` option per spec.
|
||||
* Use with care.
|
||||
* - Always operates in non-fatal [mode](https://encoding.spec.whatwg.org/#textdecoder-error-mode),
|
||||
* aka replacement. It can convert different byte sequences to equal strings.
|
||||
*
|
||||
* This method is similar to the following code, except that it doesn't support encoding labels and
|
||||
* only expects lowercased encoding name:
|
||||
*
|
||||
* ```js
|
||||
* new TextDecoder(getBOMEncoding(input) ?? fallbackEncoding).decode(input)
|
||||
* ```
|
||||
*
|
||||
* @param input - The bytes to decode
|
||||
* @param fallbackEncoding - The encoding to use if no BOM detected (default: 'utf-8')
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function legacyHookDecode(
|
||||
input: ArrayBufferLike | ArrayBufferView,
|
||||
fallbackEncoding?: string
|
||||
): string;
|
||||
|
||||
/**
|
||||
* Implements [get an encoding from a string `label`](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
*
|
||||
* Convert an encoding [label](https://encoding.spec.whatwg.org/#names-and-labels) to its name,
|
||||
* as a case-sensitive string.
|
||||
*
|
||||
* If an encoding with that label does not exist, returns `null`.
|
||||
*
|
||||
* All encoding names are also valid labels for corresponding encodings.
|
||||
*
|
||||
* @param label - The encoding label
|
||||
* @returns The proper case encoding name, or null if invalid
|
||||
*/
|
||||
export function labelToName(label: string): string | null;
|
||||
|
||||
/**
|
||||
* [TextDecoder](https://encoding.spec.whatwg.org/#interface-textdecoder) implementation/polyfill.
|
||||
*
|
||||
* Decode bytes to strings according to [WHATWG Encoding](https://encoding.spec.whatwg.org) specification.
|
||||
*/
|
||||
export const TextDecoder: typeof globalThis.TextDecoder;
|
||||
|
||||
/**
|
||||
* [TextEncoder](https://encoding.spec.whatwg.org/#interface-textencoder) implementation/polyfill.
|
||||
*
|
||||
* Encode strings to UTF-8 bytes according to [WHATWG Encoding](https://encoding.spec.whatwg.org) specification.
|
||||
*/
|
||||
export const TextEncoder: typeof globalThis.TextEncoder;
|
||||
|
||||
/**
|
||||
* [TextDecoderStream](https://encoding.spec.whatwg.org/#interface-textdecoderstream) implementation/polyfill.
|
||||
*
|
||||
* A [Streams](https://streams.spec.whatwg.org/) wrapper for `TextDecoder`.
|
||||
*
|
||||
* Requires [Streams](https://streams.spec.whatwg.org/) to be either supported by the platform or
|
||||
* [polyfilled](https://npmjs.com/package/web-streams-polyfill).
|
||||
*/
|
||||
export const TextDecoderStream: typeof globalThis.TextDecoderStream;
|
||||
|
||||
/**
|
||||
* [TextEncoderStream](https://encoding.spec.whatwg.org/#interface-textencoderstream) implementation/polyfill.
|
||||
*
|
||||
* A [Streams](https://streams.spec.whatwg.org/) wrapper for `TextEncoder`.
|
||||
*
|
||||
* Requires [Streams](https://streams.spec.whatwg.org/) to be either supported by the platform or
|
||||
* [polyfilled](https://npmjs.com/package/web-streams-polyfill).
|
||||
*/
|
||||
export const TextEncoderStream: typeof globalThis.TextEncoderStream;
|
||||
16
node_modules/@exodus/bytes/encoding.js
generated
vendored
Normal file
16
node_modules/@exodus/bytes/encoding.js
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
import { createMultibyteDecoder } from '@exodus/bytes/multi-byte.js'
|
||||
import { multibyteEncoder } from './fallback/multi-byte.js'
|
||||
import { setMultibyte } from './fallback/encoding.js'
|
||||
|
||||
setMultibyte(createMultibyteDecoder, multibyteEncoder)
|
||||
|
||||
export {
|
||||
TextDecoder,
|
||||
TextEncoder,
|
||||
TextDecoderStream,
|
||||
TextEncoderStream,
|
||||
normalizeEncoding,
|
||||
getBOMEncoding,
|
||||
labelToName,
|
||||
legacyHookDecode,
|
||||
} from './fallback/encoding.js'
|
||||
136
node_modules/@exodus/bytes/fallback/_utils.js
generated
vendored
Normal file
136
node_modules/@exodus/bytes/fallback/_utils.js
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
const { Buffer, TextEncoder, TextDecoder } = globalThis
|
||||
const haveNativeBuffer = Buffer && !Buffer.TYPED_ARRAY_SUPPORT
|
||||
export const nativeBuffer = haveNativeBuffer ? Buffer : null
|
||||
export const isHermes = !!globalThis.HermesInternal
|
||||
export const isDeno = !!globalThis.Deno
|
||||
export const isLE = /* @__PURE__ */ (() => new Uint8Array(Uint16Array.of(258).buffer)[0] === 2)()
|
||||
|
||||
// We consider Node.js TextDecoder/TextEncoder native
|
||||
let isNative = (x) => x && (haveNativeBuffer || `${x}`.includes('[native code]'))
|
||||
if (!haveNativeBuffer && isNative(() => {})) isNative = () => false // e.g. XS, we don't want false positives
|
||||
|
||||
export const nativeEncoder = isNative(TextEncoder) ? new TextEncoder() : null
|
||||
export const nativeDecoder = isNative(TextDecoder)
|
||||
? new TextDecoder('utf-8', { ignoreBOM: true })
|
||||
: null
|
||||
|
||||
// Actually windows-1252, compatible with ascii and latin1 decoding
|
||||
// Beware that on non-latin1, i.e. on windows-1252, this is broken in ~all Node.js versions released
|
||||
// in 2025 due to a regression, so we call it Latin1 as it's usable only for that
|
||||
const getNativeLatin1 = () => {
|
||||
// Not all barebone engines with TextDecoder support something except utf-8, detect
|
||||
if (nativeDecoder) {
|
||||
try {
|
||||
return new TextDecoder('latin1', { ignoreBOM: true })
|
||||
} catch {}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
export const nativeDecoderLatin1 = /* @__PURE__ */ getNativeLatin1()
|
||||
export const canDecoders = !!nativeDecoderLatin1
|
||||
|
||||
// Block Firefox < 146 specifically from using native hex/base64, as it's very slow there
|
||||
// Refs: https://bugzilla.mozilla.org/show_bug.cgi?id=1994067 (and linked issues), fixed in 146
|
||||
// Before that, all versions of Firefox >= 133 are slow
|
||||
// TODO: this could be removed when < 146 usage diminishes (note ESR)
|
||||
// We do not worry about false-negatives here but worry about false-positives!
|
||||
function shouldSkipBuiltins() {
|
||||
const g = globalThis
|
||||
// First, attempt to exclude as many things as we can using trivial checks, just in case, and to not hit ua
|
||||
if (haveNativeBuffer || isHermes || !g.window || g.chrome || !g.navigator) return false
|
||||
try {
|
||||
// This was fixed specifically in Firefox 146. Other engines except Hermes (already returned) get this right
|
||||
new WeakSet().add(Symbol()) // eslint-disable-line symbol-description
|
||||
return false
|
||||
} catch {
|
||||
// In catch and not after in case if something too smart optimizes out code in try. False-negative is acceptable in that case
|
||||
if (!('onmozfullscreenerror' in g)) return false // Firefox has it (might remove in the future, but we don't care)
|
||||
return /firefox/i.test(g.navigator.userAgent || '') // as simple as we can
|
||||
}
|
||||
|
||||
/* c8 ignore next */
|
||||
return false // eslint-disable-line no-unreachable
|
||||
}
|
||||
|
||||
export const skipWeb = /* @__PURE__ */ shouldSkipBuiltins()
|
||||
|
||||
function decodePartAddition(a, start, end, m) {
|
||||
let o = ''
|
||||
let i = start
|
||||
for (const last3 = end - 3; i < last3; i += 4) {
|
||||
const x0 = a[i]
|
||||
const x1 = a[i + 1]
|
||||
const x2 = a[i + 2]
|
||||
const x3 = a[i + 3]
|
||||
o += m[x0]
|
||||
o += m[x1]
|
||||
o += m[x2]
|
||||
o += m[x3]
|
||||
}
|
||||
|
||||
while (i < end) o += m[a[i++]]
|
||||
return o
|
||||
}
|
||||
|
||||
// Decoding with templates is faster on Hermes
|
||||
function decodePartTemplates(a, start, end, m) {
|
||||
let o = ''
|
||||
let i = start
|
||||
for (const last15 = end - 15; i < last15; i += 16) {
|
||||
const x0 = a[i]
|
||||
const x1 = a[i + 1]
|
||||
const x2 = a[i + 2]
|
||||
const x3 = a[i + 3]
|
||||
const x4 = a[i + 4]
|
||||
const x5 = a[i + 5]
|
||||
const x6 = a[i + 6]
|
||||
const x7 = a[i + 7]
|
||||
const x8 = a[i + 8]
|
||||
const x9 = a[i + 9]
|
||||
const x10 = a[i + 10]
|
||||
const x11 = a[i + 11]
|
||||
const x12 = a[i + 12]
|
||||
const x13 = a[i + 13]
|
||||
const x14 = a[i + 14]
|
||||
const x15 = a[i + 15]
|
||||
o += `${m[x0]}${m[x1]}${m[x2]}${m[x3]}${m[x4]}${m[x5]}${m[x6]}${m[x7]}${m[x8]}${m[x9]}${m[x10]}${m[x11]}${m[x12]}${m[x13]}${m[x14]}${m[x15]}`
|
||||
}
|
||||
|
||||
while (i < end) o += m[a[i++]]
|
||||
return o
|
||||
}
|
||||
|
||||
const decodePart = isHermes ? decodePartTemplates : decodePartAddition
|
||||
export function decode2string(arr, start, end, m) {
|
||||
if (end - start > 30_000) {
|
||||
// Limit concatenation to avoid excessive GC
|
||||
// Thresholds checked on Hermes for toHex
|
||||
const concat = []
|
||||
for (let i = start; i < end; ) {
|
||||
const step = i + 500
|
||||
const iNext = step > end ? end : step
|
||||
concat.push(decodePart(arr, i, iNext, m))
|
||||
i = iNext
|
||||
}
|
||||
|
||||
const res = concat.join('')
|
||||
concat.length = 0
|
||||
return res
|
||||
}
|
||||
|
||||
return decodePart(arr, start, end, m)
|
||||
}
|
||||
|
||||
export function assert(condition, msg) {
|
||||
if (!condition) throw new Error(msg)
|
||||
}
|
||||
|
||||
// On arrays in heap (<= 64) it's cheaper to copy into a pooled buffer than lazy-create the ArrayBuffer storage
|
||||
export const toBuf = (x) =>
|
||||
x.byteLength <= 64 && x.BYTES_PER_ELEMENT === 1
|
||||
? Buffer.from(x)
|
||||
: Buffer.from(x.buffer, x.byteOffset, x.byteLength)
|
||||
|
||||
export const E_STRING = 'Input is not a string'
|
||||
233
node_modules/@exodus/bytes/fallback/base32.js
generated
vendored
Normal file
233
node_modules/@exodus/bytes/fallback/base32.js
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
import { assertUint8 } from '../assert.js'
|
||||
import { nativeEncoder, nativeDecoder, isHermes } from './_utils.js'
|
||||
import { encodeAscii, decodeAscii } from './latin1.js'
|
||||
|
||||
// See https://datatracker.ietf.org/doc/html/rfc4648
|
||||
|
||||
const BASE32 = [...'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'] // RFC 4648, #6
|
||||
const BASE32HEX = [...'0123456789ABCDEFGHIJKLMNOPQRSTUV'] // RFC 4648, #7
|
||||
const BASE32_HELPERS = {}
|
||||
const BASE32HEX_HELPERS = {}
|
||||
|
||||
export const E_CHAR = 'Invalid character in base32 input'
|
||||
export const E_PADDING = 'Invalid base32 padding'
|
||||
export const E_LENGTH = 'Invalid base32 length'
|
||||
export const E_LAST = 'Invalid last chunk'
|
||||
|
||||
const useTemplates = isHermes // Faster on Hermes and JSC, but we use it only on Hermes
|
||||
|
||||
// We construct output by concatenating chars, this seems to be fine enough on modern JS engines
|
||||
export function toBase32(arr, isBase32Hex, padding) {
|
||||
assertUint8(arr)
|
||||
const fullChunks = Math.floor(arr.length / 5)
|
||||
const fullChunksBytes = fullChunks * 5
|
||||
let o = ''
|
||||
let i = 0
|
||||
|
||||
const alphabet = isBase32Hex ? BASE32HEX : BASE32
|
||||
const helpers = isBase32Hex ? BASE32HEX_HELPERS : BASE32_HELPERS
|
||||
if (!helpers.pairs) {
|
||||
helpers.pairs = []
|
||||
if (nativeDecoder) {
|
||||
// Lazy to save memory in case if this is not needed
|
||||
helpers.codepairs = new Uint16Array(32 * 32)
|
||||
const u16 = helpers.codepairs
|
||||
const u8 = new Uint8Array(u16.buffer, u16.byteOffset, u16.byteLength) // write as 1-byte to ignore BE/LE difference
|
||||
for (let i = 0; i < 32; i++) {
|
||||
const ic = alphabet[i].charCodeAt(0)
|
||||
for (let j = 0; j < 32; j++) u8[(i << 6) | (j << 1)] = u8[(j << 6) | ((i << 1) + 1)] = ic
|
||||
}
|
||||
} else {
|
||||
const p = helpers.pairs
|
||||
for (let i = 0; i < 32; i++) {
|
||||
for (let j = 0; j < 32; j++) p.push(`${alphabet[i]}${alphabet[j]}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { pairs, codepairs } = helpers
|
||||
|
||||
// Fast path for complete blocks
|
||||
// This whole loop can be commented out, the algorithm won't change, it's just an optimization of the next loop
|
||||
if (nativeDecoder) {
|
||||
const oa = new Uint16Array(fullChunks * 4)
|
||||
for (let j = 0; i < fullChunksBytes; i += 5) {
|
||||
const a = arr[i]
|
||||
const b = arr[i + 1]
|
||||
const c = arr[i + 2]
|
||||
const d = arr[i + 3]
|
||||
const e = arr[i + 4]
|
||||
const x0 = (a << 2) | (b >> 6) // 8 + 8 - 5 - 5 = 6 left
|
||||
const x1 = ((b & 0x3f) << 4) | (c >> 4) // 6 + 8 - 5 - 5 = 4 left
|
||||
const x2 = ((c & 0xf) << 6) | (d >> 2) // 4 + 8 - 5 - 5 = 2 left
|
||||
const x3 = ((d & 0x3) << 8) | e // 2 + 8 - 5 - 5 = 0 left
|
||||
oa[j] = codepairs[x0]
|
||||
oa[j + 1] = codepairs[x1]
|
||||
oa[j + 2] = codepairs[x2]
|
||||
oa[j + 3] = codepairs[x3]
|
||||
j += 4
|
||||
}
|
||||
|
||||
o = decodeAscii(oa)
|
||||
} else if (useTemplates) {
|
||||
// Templates are faster only on Hermes and JSC. Browsers have TextDecoder anyway
|
||||
for (; i < fullChunksBytes; i += 5) {
|
||||
const a = arr[i]
|
||||
const b = arr[i + 1]
|
||||
const c = arr[i + 2]
|
||||
const d = arr[i + 3]
|
||||
const e = arr[i + 4]
|
||||
const x0 = (a << 2) | (b >> 6) // 8 + 8 - 5 - 5 = 6 left
|
||||
const x1 = ((b & 0x3f) << 4) | (c >> 4) // 6 + 8 - 5 - 5 = 4 left
|
||||
const x2 = ((c & 0xf) << 6) | (d >> 2) // 4 + 8 - 5 - 5 = 2 left
|
||||
const x3 = ((d & 0x3) << 8) | e // 2 + 8 - 5 - 5 = 0 left
|
||||
o += `${pairs[x0]}${pairs[x1]}${pairs[x2]}${pairs[x3]}`
|
||||
}
|
||||
} else {
|
||||
for (; i < fullChunksBytes; i += 5) {
|
||||
const a = arr[i]
|
||||
const b = arr[i + 1]
|
||||
const c = arr[i + 2]
|
||||
const d = arr[i + 3]
|
||||
const e = arr[i + 4]
|
||||
const x0 = (a << 2) | (b >> 6) // 8 + 8 - 5 - 5 = 6 left
|
||||
const x1 = ((b & 0x3f) << 4) | (c >> 4) // 6 + 8 - 5 - 5 = 4 left
|
||||
const x2 = ((c & 0xf) << 6) | (d >> 2) // 4 + 8 - 5 - 5 = 2 left
|
||||
const x3 = ((d & 0x3) << 8) | e // 2 + 8 - 5 - 5 = 0 left
|
||||
o += pairs[x0]
|
||||
o += pairs[x1]
|
||||
o += pairs[x2]
|
||||
o += pairs[x3]
|
||||
}
|
||||
}
|
||||
|
||||
// If we have something left, process it with a full algo
|
||||
let carry = 0
|
||||
let shift = 3 // First byte needs to be shifted by 3 to get 5 bits
|
||||
for (; i < arr.length; i++) {
|
||||
const x = arr[i]
|
||||
o += alphabet[carry | (x >> shift)] // shift >= 3, so this fits
|
||||
if (shift >= 5) {
|
||||
shift -= 5
|
||||
o += alphabet[(x >> shift) & 0x1f]
|
||||
}
|
||||
|
||||
carry = (x << (5 - shift)) & 0x1f
|
||||
shift += 3 // Each byte prints 5 bits and leaves 3 bits
|
||||
}
|
||||
|
||||
if (shift !== 3) o += alphabet[carry] // shift 3 means we have no carry left
|
||||
if (padding) o += ['', '======', '====', '===', '='][arr.length - fullChunksBytes]
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// TODO: can this be optimized? This only affects non-Hermes barebone engines though
|
||||
const mapSize = nativeEncoder ? 128 : 65_536 // we have to store 64 KiB map or recheck everything if we can't decode to byte array
|
||||
|
||||
export function fromBase32(str, isBase32Hex) {
|
||||
let inputLength = str.length
|
||||
while (str[inputLength - 1] === '=') inputLength--
|
||||
const paddingLength = str.length - inputLength
|
||||
const tailLength = inputLength % 8
|
||||
const mainLength = inputLength - tailLength // multiples of 8
|
||||
if (![0, 2, 4, 5, 7].includes(tailLength)) throw new SyntaxError(E_LENGTH) // fast verification
|
||||
if (paddingLength > 7 || (paddingLength !== 0 && str.length % 8 !== 0)) {
|
||||
throw new SyntaxError(E_PADDING)
|
||||
}
|
||||
|
||||
const alphabet = isBase32Hex ? BASE32HEX : BASE32
|
||||
const helpers = isBase32Hex ? BASE32HEX_HELPERS : BASE32_HELPERS
|
||||
|
||||
if (!helpers.fromMap) {
|
||||
helpers.fromMap = new Int8Array(mapSize).fill(-1) // no regex input validation here, so we map all other bytes to -1 and recheck sign
|
||||
alphabet.forEach((c, i) => {
|
||||
helpers.fromMap[c.charCodeAt(0)] = helpers.fromMap[c.toLowerCase().charCodeAt(0)] = i
|
||||
})
|
||||
}
|
||||
|
||||
const m = helpers.fromMap
|
||||
|
||||
const arr = new Uint8Array(Math.floor((inputLength * 5) / 8))
|
||||
let at = 0
|
||||
let i = 0
|
||||
|
||||
if (nativeEncoder) {
|
||||
const codes = encodeAscii(str, E_CHAR)
|
||||
for (; i < mainLength; i += 8) {
|
||||
// each 5 bits, grouped 5 * 4 = 20
|
||||
const x0 = codes[i]
|
||||
const x1 = codes[i + 1]
|
||||
const x2 = codes[i + 2]
|
||||
const x3 = codes[i + 3]
|
||||
const x4 = codes[i + 4]
|
||||
const x5 = codes[i + 5]
|
||||
const x6 = codes[i + 6]
|
||||
const x7 = codes[i + 7]
|
||||
const a = (m[x0] << 15) | (m[x1] << 10) | (m[x2] << 5) | m[x3]
|
||||
const b = (m[x4] << 15) | (m[x5] << 10) | (m[x6] << 5) | m[x7]
|
||||
if (a < 0 || b < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at] = a >> 12
|
||||
arr[at + 1] = (a >> 4) & 0xff
|
||||
arr[at + 2] = ((a << 4) & 0xff) | (b >> 16)
|
||||
arr[at + 3] = (b >> 8) & 0xff
|
||||
arr[at + 4] = b & 0xff
|
||||
at += 5
|
||||
}
|
||||
} else {
|
||||
for (; i < mainLength; i += 8) {
|
||||
// each 5 bits, grouped 5 * 4 = 20
|
||||
const x0 = str.charCodeAt(i)
|
||||
const x1 = str.charCodeAt(i + 1)
|
||||
const x2 = str.charCodeAt(i + 2)
|
||||
const x3 = str.charCodeAt(i + 3)
|
||||
const x4 = str.charCodeAt(i + 4)
|
||||
const x5 = str.charCodeAt(i + 5)
|
||||
const x6 = str.charCodeAt(i + 6)
|
||||
const x7 = str.charCodeAt(i + 7)
|
||||
const a = (m[x0] << 15) | (m[x1] << 10) | (m[x2] << 5) | m[x3]
|
||||
const b = (m[x4] << 15) | (m[x5] << 10) | (m[x6] << 5) | m[x7]
|
||||
if (a < 0 || b < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at] = a >> 12
|
||||
arr[at + 1] = (a >> 4) & 0xff
|
||||
arr[at + 2] = ((a << 4) & 0xff) | (b >> 16)
|
||||
arr[at + 3] = (b >> 8) & 0xff
|
||||
arr[at + 4] = b & 0xff
|
||||
at += 5
|
||||
}
|
||||
}
|
||||
|
||||
// Last block, valid tailLength: 0 2 4 5 7, checked already
|
||||
// We check last chunk to be strict
|
||||
if (tailLength < 2) return arr
|
||||
const ab = (m[str.charCodeAt(i++)] << 5) | m[str.charCodeAt(i++)]
|
||||
if (ab < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at++] = ab >> 2
|
||||
if (tailLength < 4) {
|
||||
if (ab & 0x3) throw new SyntaxError(E_LAST)
|
||||
return arr
|
||||
}
|
||||
|
||||
const cd = (m[str.charCodeAt(i++)] << 5) | m[str.charCodeAt(i++)]
|
||||
if (cd < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at++] = ((ab << 6) & 0xff) | (cd >> 4)
|
||||
if (tailLength < 5) {
|
||||
if (cd & 0xf) throw new SyntaxError(E_LAST)
|
||||
return arr
|
||||
}
|
||||
|
||||
const e = m[str.charCodeAt(i++)]
|
||||
if (e < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at++] = ((cd << 4) & 0xff) | (e >> 1) // 4 + 4
|
||||
if (tailLength < 7) {
|
||||
if (e & 0x1) throw new SyntaxError(E_LAST)
|
||||
return arr
|
||||
}
|
||||
|
||||
const fg = (m[str.charCodeAt(i++)] << 5) | m[str.charCodeAt(i++)]
|
||||
if (fg < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at++] = ((e << 7) & 0xff) | (fg >> 3) // 1 + 5 + 2
|
||||
// Can't be 8, so no h
|
||||
if (fg & 0x7) throw new SyntaxError(E_LAST)
|
||||
return arr
|
||||
}
|
||||
53
node_modules/@exodus/bytes/fallback/base58check.js
generated
vendored
Normal file
53
node_modules/@exodus/bytes/fallback/base58check.js
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
import { typedView } from '@exodus/bytes/array.js'
|
||||
import { toBase58, fromBase58 } from '@exodus/bytes/base58.js'
|
||||
import { assertUint8 } from '../assert.js'
|
||||
|
||||
const E_CHECKSUM = 'Invalid checksum'
|
||||
|
||||
// checksum length is 4, i.e. only the first 4 bytes of the hash are used
|
||||
|
||||
function encodeWithChecksum(arr, checksum) {
|
||||
// arr type in already validated in input
|
||||
const res = new Uint8Array(arr.length + 4)
|
||||
res.set(arr, 0)
|
||||
res.set(checksum.subarray(0, 4), arr.length)
|
||||
return toBase58(res)
|
||||
}
|
||||
|
||||
function decodeWithChecksum(str) {
|
||||
const arr = fromBase58(str) // checks input
|
||||
const payloadSize = arr.length - 4
|
||||
if (payloadSize < 0) throw new Error(E_CHECKSUM)
|
||||
return [arr.subarray(0, payloadSize), arr.subarray(payloadSize)]
|
||||
}
|
||||
|
||||
function assertChecksum(c, r) {
|
||||
if ((c[0] ^ r[0]) | (c[1] ^ r[1]) | (c[2] ^ r[2]) | (c[3] ^ r[3])) throw new Error(E_CHECKSUM)
|
||||
}
|
||||
|
||||
export const makeBase58check = (hashAlgo, hashAlgoSync) => {
|
||||
const apis = {
|
||||
async encode(arr) {
|
||||
assertUint8(arr)
|
||||
return encodeWithChecksum(arr, await hashAlgo(arr))
|
||||
},
|
||||
async decode(str, format = 'uint8') {
|
||||
const [payload, checksum] = decodeWithChecksum(str)
|
||||
assertChecksum(checksum, await hashAlgo(payload))
|
||||
return typedView(payload, format)
|
||||
},
|
||||
}
|
||||
if (!hashAlgoSync) return apis
|
||||
return {
|
||||
...apis,
|
||||
encodeSync(arr) {
|
||||
assertUint8(arr)
|
||||
return encodeWithChecksum(arr, hashAlgoSync(arr))
|
||||
},
|
||||
decodeSync(str, format = 'uint8') {
|
||||
const [payload, checksum] = decodeWithChecksum(str)
|
||||
assertChecksum(checksum, hashAlgoSync(payload))
|
||||
return typedView(payload, format)
|
||||
},
|
||||
}
|
||||
}
|
||||
192
node_modules/@exodus/bytes/fallback/base64.js
generated
vendored
Normal file
192
node_modules/@exodus/bytes/fallback/base64.js
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
import { assertUint8 } from '../assert.js'
|
||||
import { nativeEncoder, nativeDecoder } from './_utils.js'
|
||||
import { encodeAscii, decodeAscii } from './latin1.js'
|
||||
|
||||
// See https://datatracker.ietf.org/doc/html/rfc4648
|
||||
|
||||
const BASE64 = [...'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/']
|
||||
const BASE64URL = [...'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_']
|
||||
const BASE64_HELPERS = {}
|
||||
const BASE64URL_HELPERS = {}
|
||||
|
||||
export const E_CHAR = 'Invalid character in base64 input'
|
||||
export const E_PADDING = 'Invalid base64 padding'
|
||||
export const E_LENGTH = 'Invalid base64 length'
|
||||
export const E_LAST = 'Invalid last chunk'
|
||||
|
||||
// We construct output by concatenating chars, this seems to be fine enough on modern JS engines
|
||||
export function toBase64(arr, isURL, padding) {
|
||||
assertUint8(arr)
|
||||
const fullChunks = (arr.length / 3) | 0
|
||||
const fullChunksBytes = fullChunks * 3
|
||||
let o = ''
|
||||
let i = 0
|
||||
|
||||
const alphabet = isURL ? BASE64URL : BASE64
|
||||
const helpers = isURL ? BASE64URL_HELPERS : BASE64_HELPERS
|
||||
if (!helpers.pairs) {
|
||||
helpers.pairs = []
|
||||
if (nativeDecoder) {
|
||||
// Lazy to save memory in case if this is not needed
|
||||
helpers.codepairs = new Uint16Array(64 * 64)
|
||||
const u16 = helpers.codepairs
|
||||
const u8 = new Uint8Array(u16.buffer, u16.byteOffset, u16.byteLength) // write as 1-byte to ignore BE/LE difference
|
||||
for (let i = 0; i < 64; i++) {
|
||||
const ic = alphabet[i].charCodeAt(0)
|
||||
for (let j = 0; j < 64; j++) u8[(i << 7) | (j << 1)] = u8[(j << 7) | ((i << 1) + 1)] = ic
|
||||
}
|
||||
} else {
|
||||
const p = helpers.pairs
|
||||
for (let i = 0; i < 64; i++) {
|
||||
for (let j = 0; j < 64; j++) p.push(`${alphabet[i]}${alphabet[j]}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { pairs, codepairs } = helpers
|
||||
|
||||
// Fast path for complete blocks
|
||||
// This whole loop can be commented out, the algorithm won't change, it's just an optimization of the next loop
|
||||
if (nativeDecoder) {
|
||||
const oa = new Uint16Array(fullChunks * 2)
|
||||
let j = 0
|
||||
for (const last = arr.length - 11; i < last; i += 12, j += 8) {
|
||||
const x0 = arr[i]
|
||||
const x1 = arr[i + 1]
|
||||
const x2 = arr[i + 2]
|
||||
const x3 = arr[i + 3]
|
||||
const x4 = arr[i + 4]
|
||||
const x5 = arr[i + 5]
|
||||
const x6 = arr[i + 6]
|
||||
const x7 = arr[i + 7]
|
||||
const x8 = arr[i + 8]
|
||||
const x9 = arr[i + 9]
|
||||
const x10 = arr[i + 10]
|
||||
const x11 = arr[i + 11]
|
||||
oa[j] = codepairs[(x0 << 4) | (x1 >> 4)]
|
||||
oa[j + 1] = codepairs[((x1 & 0x0f) << 8) | x2]
|
||||
oa[j + 2] = codepairs[(x3 << 4) | (x4 >> 4)]
|
||||
oa[j + 3] = codepairs[((x4 & 0x0f) << 8) | x5]
|
||||
oa[j + 4] = codepairs[(x6 << 4) | (x7 >> 4)]
|
||||
oa[j + 5] = codepairs[((x7 & 0x0f) << 8) | x8]
|
||||
oa[j + 6] = codepairs[(x9 << 4) | (x10 >> 4)]
|
||||
oa[j + 7] = codepairs[((x10 & 0x0f) << 8) | x11]
|
||||
}
|
||||
|
||||
// i < last here is equivalent to i < fullChunksBytes
|
||||
for (const last = arr.length - 2; i < last; i += 3, j += 2) {
|
||||
const a = arr[i]
|
||||
const b = arr[i + 1]
|
||||
const c = arr[i + 2]
|
||||
oa[j] = codepairs[(a << 4) | (b >> 4)]
|
||||
oa[j + 1] = codepairs[((b & 0x0f) << 8) | c]
|
||||
}
|
||||
|
||||
o = decodeAscii(oa)
|
||||
} else {
|
||||
// This can be optimized by ~25% with templates on Hermes, but this codepath is not called on Hermes, it uses btoa
|
||||
// Check git history for templates version
|
||||
for (; i < fullChunksBytes; i += 3) {
|
||||
const a = arr[i]
|
||||
const b = arr[i + 1]
|
||||
const c = arr[i + 2]
|
||||
o += pairs[(a << 4) | (b >> 4)]
|
||||
o += pairs[((b & 0x0f) << 8) | c]
|
||||
}
|
||||
}
|
||||
|
||||
// If we have something left, process it with a full algo
|
||||
let carry = 0
|
||||
let shift = 2 // First byte needs to be shifted by 2 to get 6 bits
|
||||
const length = arr.length
|
||||
for (; i < length; i++) {
|
||||
const x = arr[i]
|
||||
o += alphabet[carry | (x >> shift)] // shift >= 2, so this fits
|
||||
if (shift === 6) {
|
||||
shift = 0
|
||||
o += alphabet[x & 0x3f]
|
||||
}
|
||||
|
||||
carry = (x << (6 - shift)) & 0x3f
|
||||
shift += 2 // Each byte prints 6 bits and leaves 2 bits
|
||||
}
|
||||
|
||||
if (shift !== 2) o += alphabet[carry] // shift 2 means we have no carry left
|
||||
if (padding) o += ['', '==', '='][length - fullChunksBytes]
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// TODO: can this be optimized? This only affects non-Hermes barebone engines though
|
||||
const mapSize = nativeEncoder ? 128 : 65_536 // we have to store 64 KiB map or recheck everything if we can't decode to byte array
|
||||
|
||||
export function fromBase64(str, isURL) {
|
||||
let inputLength = str.length
|
||||
while (str[inputLength - 1] === '=') inputLength--
|
||||
const paddingLength = str.length - inputLength
|
||||
const tailLength = inputLength % 4
|
||||
const mainLength = inputLength - tailLength // multiples of 4
|
||||
if (tailLength === 1) throw new SyntaxError(E_LENGTH)
|
||||
if (paddingLength > 3 || (paddingLength !== 0 && str.length % 4 !== 0)) {
|
||||
throw new SyntaxError(E_PADDING)
|
||||
}
|
||||
|
||||
const alphabet = isURL ? BASE64URL : BASE64
|
||||
const helpers = isURL ? BASE64URL_HELPERS : BASE64_HELPERS
|
||||
|
||||
if (!helpers.fromMap) {
|
||||
helpers.fromMap = new Int8Array(mapSize).fill(-1) // no regex input validation here, so we map all other bytes to -1 and recheck sign
|
||||
alphabet.forEach((c, i) => (helpers.fromMap[c.charCodeAt(0)] = i))
|
||||
}
|
||||
|
||||
const m = helpers.fromMap
|
||||
|
||||
const arr = new Uint8Array(Math.floor((inputLength * 3) / 4))
|
||||
let at = 0
|
||||
let i = 0
|
||||
|
||||
if (nativeEncoder) {
|
||||
const codes = encodeAscii(str, E_CHAR)
|
||||
for (; i < mainLength; i += 4) {
|
||||
const c0 = codes[i]
|
||||
const c1 = codes[i + 1]
|
||||
const c2 = codes[i + 2]
|
||||
const c3 = codes[i + 3]
|
||||
const a = (m[c0] << 18) | (m[c1] << 12) | (m[c2] << 6) | m[c3]
|
||||
if (a < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at] = a >> 16
|
||||
arr[at + 1] = (a >> 8) & 0xff
|
||||
arr[at + 2] = a & 0xff
|
||||
at += 3
|
||||
}
|
||||
} else {
|
||||
for (; i < mainLength; i += 4) {
|
||||
const c0 = str.charCodeAt(i)
|
||||
const c1 = str.charCodeAt(i + 1)
|
||||
const c2 = str.charCodeAt(i + 2)
|
||||
const c3 = str.charCodeAt(i + 3)
|
||||
const a = (m[c0] << 18) | (m[c1] << 12) | (m[c2] << 6) | m[c3]
|
||||
if (a < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at] = a >> 16
|
||||
arr[at + 1] = (a >> 8) & 0xff
|
||||
arr[at + 2] = a & 0xff
|
||||
at += 3
|
||||
}
|
||||
}
|
||||
|
||||
// Can be 0, 2 or 3, verified by padding checks already
|
||||
if (tailLength < 2) return arr // 0
|
||||
const ab = (m[str.charCodeAt(i++)] << 6) | m[str.charCodeAt(i++)]
|
||||
if (ab < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at++] = ab >> 4
|
||||
if (tailLength < 3) {
|
||||
if (ab & 0xf) throw new SyntaxError(E_LAST)
|
||||
return arr // 2
|
||||
}
|
||||
|
||||
const c = m[str.charCodeAt(i++)]
|
||||
if (c < 0) throw new SyntaxError(E_CHAR)
|
||||
arr[at++] = ((ab << 4) & 0xff) | (c >> 2)
|
||||
if (c & 0x3) throw new SyntaxError(E_LAST)
|
||||
return arr // 3
|
||||
}
|
||||
81
node_modules/@exodus/bytes/fallback/encoding.api.js
generated
vendored
Normal file
81
node_modules/@exodus/bytes/fallback/encoding.api.js
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
import labels from './encoding.labels.js'
|
||||
|
||||
let labelsMap
|
||||
|
||||
export const E_ENCODING = 'Unknown encoding'
|
||||
|
||||
// Warning: unlike whatwg-encoding, returns lowercased labels
|
||||
// Those are case-insensitive and that's how TextDecoder encoding getter normalizes them
|
||||
// https://encoding.spec.whatwg.org/#names-and-labels
|
||||
export function normalizeEncoding(label) {
|
||||
// fast path
|
||||
if (label === 'utf-8' || label === 'utf8' || label === 'UTF-8' || label === 'UTF8') return 'utf-8'
|
||||
if (label === 'windows-1252' || label === 'ascii' || label === 'latin1') return 'windows-1252'
|
||||
// full map
|
||||
if (/[^\w\t\n\f\r .:-]/i.test(label)) return null // must be ASCII (with ASCII whitespace)
|
||||
const low = `${label}`.trim().toLowerCase()
|
||||
if (Object.hasOwn(labels, low)) return low
|
||||
if (!labelsMap) {
|
||||
labelsMap = new Map()
|
||||
for (const [label, aliases] of Object.entries(labels)) {
|
||||
for (const alias of aliases) labelsMap.set(alias, label)
|
||||
}
|
||||
}
|
||||
|
||||
const mapped = labelsMap.get(low)
|
||||
if (mapped) return mapped
|
||||
return null
|
||||
}
|
||||
|
||||
// TODO: make this more strict against Symbol.toStringTag
|
||||
// Is not very significant though, anything faking Symbol.toStringTag could as well override
|
||||
// prototypes, which is not something we protect against
|
||||
|
||||
function isAnyArrayBuffer(x) {
|
||||
if (x instanceof ArrayBuffer) return true
|
||||
if (globalThis.SharedArrayBuffer && x instanceof SharedArrayBuffer) return true
|
||||
if (!x || typeof x.byteLength !== 'number') return false
|
||||
const s = Object.prototype.toString.call(x)
|
||||
return s === '[object ArrayBuffer]' || s === '[object SharedArrayBuffer]'
|
||||
}
|
||||
|
||||
export function fromSource(x) {
|
||||
if (x instanceof Uint8Array) return x
|
||||
if (ArrayBuffer.isView(x)) return new Uint8Array(x.buffer, x.byteOffset, x.byteLength)
|
||||
if (isAnyArrayBuffer(x)) {
|
||||
if ('detached' in x) return x.detached === true ? new Uint8Array() : new Uint8Array(x)
|
||||
// Old engines without .detached, try-catch
|
||||
try {
|
||||
return new Uint8Array(x)
|
||||
} catch {
|
||||
return new Uint8Array()
|
||||
}
|
||||
}
|
||||
|
||||
throw new TypeError('Argument must be a SharedArrayBuffer, ArrayBuffer or ArrayBufferView')
|
||||
}
|
||||
|
||||
// Warning: unlike whatwg-encoding, returns lowercased labels
|
||||
// Those are case-insensitive and that's how TextDecoder encoding getter normalizes them
|
||||
export function getBOMEncoding(input) {
|
||||
const u8 = fromSource(input) // asserts
|
||||
if (u8.length >= 3 && u8[0] === 0xef && u8[1] === 0xbb && u8[2] === 0xbf) return 'utf-8'
|
||||
if (u8.length < 2) return null
|
||||
if (u8[0] === 0xff && u8[1] === 0xfe) return 'utf-16le'
|
||||
if (u8[0] === 0xfe && u8[1] === 0xff) return 'utf-16be'
|
||||
return null
|
||||
}
|
||||
|
||||
const uppercasePrefixes = new Set(['utf', 'iso', 'koi', 'euc', 'ibm', 'gbk'])
|
||||
|
||||
// Unlike normalizeEncoding, case-sensitive
|
||||
// https://encoding.spec.whatwg.org/#names-and-labels
|
||||
export function labelToName(label) {
|
||||
const enc = normalizeEncoding(label)
|
||||
if (enc === 'utf-8') return 'UTF-8' // fast path
|
||||
if (!enc) return enc
|
||||
if (uppercasePrefixes.has(enc.slice(0, 3))) return enc.toUpperCase()
|
||||
if (enc === 'big5') return 'Big5'
|
||||
if (enc === 'shift_jis') return 'Shift_JIS'
|
||||
return enc
|
||||
}
|
||||
319
node_modules/@exodus/bytes/fallback/encoding.js
generated
vendored
Normal file
319
node_modules/@exodus/bytes/fallback/encoding.js
generated
vendored
Normal file
@@ -0,0 +1,319 @@
|
||||
// We can't return native TextDecoder if it's present, as Node.js one is broken on windows-1252 and we fix that
|
||||
// We are also faster than Node.js built-in on both TextEncoder and TextDecoder
|
||||
|
||||
import { utf16toString, utf16toStringLoose } from '@exodus/bytes/utf16.js'
|
||||
import { utf8fromStringLoose, utf8toString, utf8toStringLoose } from '@exodus/bytes/utf8.js'
|
||||
import { createSinglebyteDecoder } from '@exodus/bytes/single-byte.js'
|
||||
import labels from './encoding.labels.js'
|
||||
import { fromSource, getBOMEncoding, normalizeEncoding, E_ENCODING } from './encoding.api.js'
|
||||
import { unfinishedBytes, mergePrefix } from './encoding.util.js'
|
||||
|
||||
export { labelToName, getBOMEncoding, normalizeEncoding } from './encoding.api.js'
|
||||
|
||||
const E_MULTI = "import '@exodus/bytes/encoding.js' for legacy multi-byte encodings support"
|
||||
const E_OPTIONS = 'The "options" argument must be of type object'
|
||||
const replacementChar = '\uFFFD'
|
||||
const multibyteSet = new Set(['big5', 'euc-kr', 'euc-jp', 'iso-2022-jp', 'shift_jis', 'gbk', 'gb18030']) // prettier-ignore
|
||||
let createMultibyteDecoder, multibyteEncoder
|
||||
|
||||
export const isMultibyte = (enc) => multibyteSet.has(enc)
|
||||
export function setMultibyte(createDecoder, createEncoder) {
|
||||
createMultibyteDecoder = createDecoder
|
||||
multibyteEncoder = createEncoder
|
||||
}
|
||||
|
||||
export function getMultibyteEncoder() {
|
||||
if (!multibyteEncoder) throw new Error(E_MULTI)
|
||||
return multibyteEncoder
|
||||
}
|
||||
|
||||
const define = (obj, key, value) => Object.defineProperty(obj, key, { value, writable: false })
|
||||
|
||||
function isAnyUint8Array(x) {
|
||||
if (x instanceof Uint8Array) return true
|
||||
if (!x || !ArrayBuffer.isView(x) || x.BYTES_PER_ELEMENT !== 1) return false
|
||||
return Object.prototype.toString.call(x) === '[object Uint8Array]'
|
||||
}
|
||||
|
||||
function unicodeDecoder(encoding, loose) {
|
||||
if (encoding === 'utf-8') return loose ? utf8toStringLoose : utf8toString // likely
|
||||
const form = encoding === 'utf-16le' ? 'uint8-le' : 'uint8-be'
|
||||
return loose ? (u) => utf16toStringLoose(u, form) : (u) => utf16toString(u, form)
|
||||
}
|
||||
|
||||
export class TextDecoder {
|
||||
#decode
|
||||
#unicode
|
||||
#multibyte
|
||||
#chunk
|
||||
#canBOM
|
||||
|
||||
constructor(encoding = 'utf-8', options = {}) {
|
||||
if (typeof options !== 'object') throw new TypeError(E_OPTIONS)
|
||||
const enc = normalizeEncoding(encoding)
|
||||
if (!enc || enc === 'replacement') throw new RangeError(E_ENCODING)
|
||||
define(this, 'encoding', enc)
|
||||
define(this, 'fatal', !!options.fatal)
|
||||
define(this, 'ignoreBOM', !!options.ignoreBOM)
|
||||
this.#unicode = enc === 'utf-8' || enc === 'utf-16le' || enc === 'utf-16be'
|
||||
this.#multibyte = !this.#unicode && isMultibyte(enc)
|
||||
this.#canBOM = this.#unicode && !this.ignoreBOM
|
||||
}
|
||||
|
||||
get [Symbol.toStringTag]() {
|
||||
return 'TextDecoder'
|
||||
}
|
||||
|
||||
decode(input, options = {}) {
|
||||
if (typeof options !== 'object') throw new TypeError(E_OPTIONS)
|
||||
const stream = !!options.stream
|
||||
let u = input === undefined ? new Uint8Array() : fromSource(input)
|
||||
const empty = u.length === 0 // also can't be streaming after next line
|
||||
if (empty && stream) return '' // no state change
|
||||
|
||||
if (this.#unicode) {
|
||||
let prefix
|
||||
if (this.#chunk) {
|
||||
const merged = mergePrefix(u, this.#chunk, this.encoding)
|
||||
if (u.length < 3) {
|
||||
u = merged // might be unfinished, but fully consumed old u
|
||||
} else {
|
||||
prefix = merged // stops at complete chunk
|
||||
const add = prefix.length - this.#chunk.length
|
||||
if (add > 0) u = u.subarray(add)
|
||||
}
|
||||
|
||||
this.#chunk = null
|
||||
} else if (empty) {
|
||||
this.#canBOM = !this.ignoreBOM // not streaming
|
||||
return ''
|
||||
}
|
||||
|
||||
// For non-stream utf-8 we don't have to do this as it matches utf8toStringLoose already
|
||||
// For non-stream loose utf-16 we still have to do this as this API supports uneven byteLength unlike utf16toStringLoose
|
||||
let suffix = ''
|
||||
if (stream || (!this.fatal && this.encoding !== 'utf-8')) {
|
||||
const trail = unfinishedBytes(u, u.byteLength, this.encoding)
|
||||
if (trail > 0) {
|
||||
if (stream) {
|
||||
this.#chunk = Uint8Array.from(u.subarray(-trail)) // copy
|
||||
} else {
|
||||
// non-fatal mode as already checked
|
||||
suffix = replacementChar
|
||||
}
|
||||
|
||||
u = u.subarray(0, -trail)
|
||||
}
|
||||
}
|
||||
|
||||
let seenBOM = false
|
||||
if (this.#canBOM) {
|
||||
const bom = this.#findBom(prefix ?? u)
|
||||
if (bom) {
|
||||
seenBOM = true
|
||||
if (prefix) {
|
||||
prefix = prefix.subarray(bom)
|
||||
} else {
|
||||
u = u.subarray(bom)
|
||||
}
|
||||
}
|
||||
} else if (!stream && !this.ignoreBOM) {
|
||||
this.#canBOM = true
|
||||
}
|
||||
|
||||
if (!this.#decode) this.#decode = unicodeDecoder(this.encoding, !this.fatal)
|
||||
try {
|
||||
const res = (prefix ? this.#decode(prefix) : '') + this.#decode(u) + suffix
|
||||
// "BOM seen" is set on the current decode call only if it did not error, in "serialize I/O queue" after decoding
|
||||
if (stream && (seenBOM || res.length > 0)) this.#canBOM = false
|
||||
return res
|
||||
} catch (err) {
|
||||
this.#chunk = null // reset unfinished chunk on errors
|
||||
// The correct way per spec seems to be not destroying the decoder state (aka BOM here) in stream mode
|
||||
// See also multi-byte.js
|
||||
throw err
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-else-return
|
||||
} else if (this.#multibyte) {
|
||||
if (!createMultibyteDecoder) throw new Error(E_MULTI)
|
||||
if (!this.#decode) this.#decode = createMultibyteDecoder(this.encoding, !this.fatal) // can contain state!
|
||||
return this.#decode(u, stream)
|
||||
} else {
|
||||
if (!this.#decode) this.#decode = createSinglebyteDecoder(this.encoding, !this.fatal)
|
||||
return this.#decode(u)
|
||||
}
|
||||
}
|
||||
|
||||
#findBom(u) {
|
||||
switch (this.encoding) {
|
||||
case 'utf-8':
|
||||
return u.byteLength >= 3 && u[0] === 0xef && u[1] === 0xbb && u[2] === 0xbf ? 3 : 0
|
||||
case 'utf-16le':
|
||||
return u.byteLength >= 2 && u[0] === 0xff && u[1] === 0xfe ? 2 : 0
|
||||
case 'utf-16be':
|
||||
return u.byteLength >= 2 && u[0] === 0xfe && u[1] === 0xff ? 2 : 0
|
||||
}
|
||||
|
||||
/* c8 ignore next */
|
||||
throw new Error('Unreachable')
|
||||
}
|
||||
}
|
||||
|
||||
export class TextEncoder {
|
||||
constructor() {
|
||||
define(this, 'encoding', 'utf-8')
|
||||
}
|
||||
|
||||
get [Symbol.toStringTag]() {
|
||||
return 'TextEncoder'
|
||||
}
|
||||
|
||||
encode(str = '') {
|
||||
if (typeof str !== 'string') str = `${str}`
|
||||
const res = utf8fromStringLoose(str)
|
||||
return res.byteOffset === 0 ? res : res.slice(0) // Ensure 0-offset, to match new Uint8Array (per spec), which is non-pooled
|
||||
}
|
||||
|
||||
encodeInto(str, target) {
|
||||
if (typeof str !== 'string') str = `${str}`
|
||||
if (!isAnyUint8Array(target)) throw new TypeError('Target must be an Uint8Array')
|
||||
if (target.buffer.detached) return { read: 0, written: 0 } // Until https://github.com/whatwg/encoding/issues/324 is resolved
|
||||
|
||||
const tlen = target.length
|
||||
if (tlen < str.length) str = str.slice(0, tlen)
|
||||
let u8 = utf8fromStringLoose(str)
|
||||
let read
|
||||
if (tlen >= u8.length) {
|
||||
read = str.length
|
||||
} else if (u8.length === str.length) {
|
||||
if (u8.length > tlen) u8 = u8.subarray(0, tlen) // ascii can be truncated
|
||||
read = u8.length
|
||||
} else {
|
||||
u8 = u8.subarray(0, tlen)
|
||||
const unfinished = unfinishedBytes(u8, u8.length, 'utf-8')
|
||||
if (unfinished > 0) u8 = u8.subarray(0, u8.length - unfinished)
|
||||
|
||||
// We can do this because loose str -> u8 -> str preserves length, unlike loose u8 -> str -> u8
|
||||
// Each unpaired surrogate (1 charcode) is replaced with a single charcode
|
||||
read = utf8toStringLoose(u8).length // FIXME: Converting back is very inefficient
|
||||
}
|
||||
|
||||
try {
|
||||
target.set(u8)
|
||||
} catch {
|
||||
return { read: 0, written: 0 } // see above, likely detached but no .detached property support
|
||||
}
|
||||
|
||||
return { read, written: u8.length }
|
||||
}
|
||||
}
|
||||
|
||||
const E_NO_STREAMS = 'TransformStream global not present in the environment'
|
||||
|
||||
// https://encoding.spec.whatwg.org/#interface-textdecoderstream
|
||||
export class TextDecoderStream {
|
||||
constructor(encoding = 'utf-8', options = {}) {
|
||||
if (!globalThis.TransformStream) throw new Error(E_NO_STREAMS)
|
||||
const decoder = new TextDecoder(encoding, options)
|
||||
const transform = new TransformStream({
|
||||
transform: (chunk, controller) => {
|
||||
const value = decoder.decode(fromSource(chunk), { stream: true })
|
||||
if (value) controller.enqueue(value)
|
||||
},
|
||||
flush: (controller) => {
|
||||
// https://streams.spec.whatwg.org/#dom-transformer-flush
|
||||
const value = decoder.decode()
|
||||
if (value) controller.enqueue(value)
|
||||
// No need to call .terminate() (Node.js is wrong)
|
||||
},
|
||||
})
|
||||
|
||||
define(this, 'encoding', decoder.encoding)
|
||||
define(this, 'fatal', decoder.fatal)
|
||||
define(this, 'ignoreBOM', decoder.ignoreBOM)
|
||||
define(this, 'readable', transform.readable)
|
||||
define(this, 'writable', transform.writable)
|
||||
}
|
||||
|
||||
get [Symbol.toStringTag]() {
|
||||
return 'TextDecoderStream'
|
||||
}
|
||||
}
|
||||
|
||||
// https://encoding.spec.whatwg.org/#interface-textencoderstream
|
||||
// Only UTF-8 per spec
|
||||
export class TextEncoderStream {
|
||||
constructor() {
|
||||
if (!globalThis.TransformStream) throw new Error(E_NO_STREAMS)
|
||||
let lead
|
||||
const transform = new TransformStream({
|
||||
// https://encoding.spec.whatwg.org/#encode-and-enqueue-a-chunk
|
||||
// Not identical in code, but reuses loose mode to have identical behavior
|
||||
transform: (chunk, controller) => {
|
||||
let s = String(chunk) // DOMString, might contain unpaired surrogates
|
||||
if (s.length === 0) return
|
||||
if (lead) {
|
||||
s = lead + s
|
||||
lead = null
|
||||
}
|
||||
|
||||
const last = s.charCodeAt(s.length - 1) // Can't come from previous lead due to length check
|
||||
if ((last & 0xfc_00) === 0xd8_00) {
|
||||
lead = s[s.length - 1]
|
||||
s = s.slice(0, -1)
|
||||
}
|
||||
|
||||
if (s) controller.enqueue(utf8fromStringLoose(s))
|
||||
},
|
||||
// https://encoding.spec.whatwg.org/#encode-and-flush
|
||||
flush: (controller) => {
|
||||
if (lead) controller.enqueue(Uint8Array.of(0xef, 0xbf, 0xbd))
|
||||
},
|
||||
})
|
||||
|
||||
define(this, 'encoding', 'utf-8')
|
||||
define(this, 'readable', transform.readable)
|
||||
define(this, 'writable', transform.writable)
|
||||
}
|
||||
|
||||
get [Symbol.toStringTag]() {
|
||||
return 'TextEncoderStream'
|
||||
}
|
||||
}
|
||||
|
||||
// https://encoding.spec.whatwg.org/#decode
|
||||
// Warning: encoding sniffed from BOM takes preference over the supplied one
|
||||
// Warning: lossy, performs replacement, no option of throwing
|
||||
// Completely ignores encoding and even skips validation when BOM is found
|
||||
// Unlike TextDecoder public API, additionally supports 'replacement' encoding
|
||||
export function legacyHookDecode(input, fallbackEncoding = 'utf-8') {
|
||||
let u8 = fromSource(input)
|
||||
const bomEncoding = getBOMEncoding(u8)
|
||||
if (bomEncoding) u8 = u8.subarray(bomEncoding === 'utf-8' ? 3 : 2)
|
||||
const enc = bomEncoding ?? normalizeEncoding(fallbackEncoding) // "the byte order mark is more authoritative than anything else"
|
||||
|
||||
if (enc === 'utf-8') return utf8toStringLoose(u8)
|
||||
if (enc === 'utf-16le' || enc === 'utf-16be') {
|
||||
let suffix = ''
|
||||
if (u8.byteLength % 2 !== 0) {
|
||||
suffix = replacementChar
|
||||
u8 = u8.subarray(0, -unfinishedBytes(u8, u8.byteLength, enc))
|
||||
}
|
||||
|
||||
return utf16toStringLoose(u8, enc === 'utf-16le' ? 'uint8-le' : 'uint8-be') + suffix
|
||||
}
|
||||
|
||||
if (!Object.hasOwn(labels, enc)) throw new RangeError(E_ENCODING)
|
||||
|
||||
if (isMultibyte(enc)) {
|
||||
if (!createMultibyteDecoder) throw new Error(E_MULTI)
|
||||
return createMultibyteDecoder(enc, true)(u8)
|
||||
}
|
||||
|
||||
// https://encoding.spec.whatwg.org/#replacement-decoder
|
||||
// On non-streaming non-fatal case, it just replaces any non-empty input with a single replacement char
|
||||
if (enc === 'replacement') return input.byteLength > 0 ? replacementChar : ''
|
||||
|
||||
return createSinglebyteDecoder(enc, true)(u8)
|
||||
}
|
||||
46
node_modules/@exodus/bytes/fallback/encoding.labels.js
generated
vendored
Normal file
46
node_modules/@exodus/bytes/fallback/encoding.labels.js
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// See https://encoding.spec.whatwg.org/#names-and-labels
|
||||
|
||||
/* eslint-disable @exodus/export-default/named */
|
||||
// prettier-ignore
|
||||
const labels = {
|
||||
'utf-8': ['unicode-1-1-utf-8', 'unicode11utf8', 'unicode20utf8', 'utf8', 'x-unicode20utf8'],
|
||||
ibm866: ['866', 'cp866', 'csibm866'],
|
||||
'iso-8859-2': ['csisolatin2', 'iso-ir-101', 'iso8859-2', 'iso88592', 'iso_8859-2', 'iso_8859-2:1987', 'l2', 'latin2'],
|
||||
'iso-8859-3': ['csisolatin3', 'iso-ir-109', 'iso8859-3', 'iso88593', 'iso_8859-3', 'iso_8859-3:1988', 'l3', 'latin3'],
|
||||
'iso-8859-4': ['csisolatin4', 'iso-ir-110', 'iso8859-4', 'iso88594', 'iso_8859-4', 'iso_8859-4:1988', 'l4', 'latin4'],
|
||||
'iso-8859-5': ['csisolatincyrillic', 'cyrillic', 'iso-ir-144', 'iso8859-5', 'iso88595', 'iso_8859-5', 'iso_8859-5:1988'],
|
||||
'iso-8859-6': ['arabic', 'asmo-708', 'csiso88596e', 'csiso88596i', 'csisolatinarabic', 'ecma-114', 'iso-8859-6-e', 'iso-8859-6-i', 'iso-ir-127', 'iso8859-6', 'iso88596', 'iso_8859-6', 'iso_8859-6:1987'],
|
||||
'iso-8859-7': ['csisolatingreek', 'ecma-118', 'elot_928', 'greek', 'greek8', 'iso-ir-126', 'iso8859-7', 'iso88597', 'iso_8859-7', 'iso_8859-7:1987', 'sun_eu_greek'],
|
||||
'iso-8859-8': ['csiso88598e', 'csisolatinhebrew', 'hebrew', 'iso-8859-8-e', 'iso-ir-138', 'iso8859-8', 'iso88598', 'iso_8859-8', 'iso_8859-8:1988', 'visual'],
|
||||
'iso-8859-8-i': ['csiso88598i', 'logical'],
|
||||
'iso-8859-10': ['csisolatin6', 'iso-ir-157', 'iso8859-10', 'iso885910', 'l6', 'latin6'],
|
||||
'iso-8859-13': ['iso8859-13', 'iso885913'],
|
||||
'iso-8859-14': ['iso8859-14', 'iso885914'],
|
||||
'iso-8859-15': ['csisolatin9', 'iso8859-15', 'iso885915', 'iso_8859-15', 'l9'],
|
||||
'iso-8859-16': [],
|
||||
'koi8-r': ['cskoi8r', 'koi', 'koi8', 'koi8_r'],
|
||||
'koi8-u': ['koi8-ru'],
|
||||
macintosh: ['csmacintosh', 'mac', 'x-mac-roman'],
|
||||
'windows-874': ['dos-874', 'iso-8859-11', 'iso8859-11', 'iso885911', 'tis-620'],
|
||||
'x-mac-cyrillic': ['x-mac-ukrainian'],
|
||||
gbk: ['chinese', 'csgb2312', 'csiso58gb231280', 'gb2312', 'gb_2312', 'gb_2312-80', 'iso-ir-58', 'x-gbk'],
|
||||
gb18030: [],
|
||||
big5: ['big5-hkscs', 'cn-big5', 'csbig5', 'x-x-big5'],
|
||||
'euc-jp': ['cseucpkdfmtjapanese', 'x-euc-jp'],
|
||||
'iso-2022-jp': ['csiso2022jp'],
|
||||
shift_jis: ['csshiftjis', 'ms932', 'ms_kanji', 'shift-jis', 'sjis', 'windows-31j', 'x-sjis'],
|
||||
'euc-kr': ['cseuckr', 'csksc56011987', 'iso-ir-149', 'korean', 'ks_c_5601-1987', 'ks_c_5601-1989', 'ksc5601', 'ksc_5601', 'windows-949'],
|
||||
replacement: ['csiso2022kr', 'hz-gb-2312', 'iso-2022-cn', 'iso-2022-cn-ext', 'iso-2022-kr'],
|
||||
'utf-16be': ['unicodefffe'],
|
||||
'utf-16le': ['csunicode', 'iso-10646-ucs-2', 'ucs-2', 'unicode', 'unicodefeff', 'utf-16'],
|
||||
'x-user-defined': [],
|
||||
}
|
||||
|
||||
for (let i = 0; i < 9; i++) labels[`windows-125${i}`] = [`cp125${i}`, `x-cp125${i}`]
|
||||
|
||||
// prettier-ignore
|
||||
labels['windows-1252'].push('ansi_x3.4-1968', 'ascii', 'cp819', 'csisolatin1', 'ibm819', 'iso-8859-1', 'iso-ir-100', 'iso8859-1', 'iso88591', 'iso_8859-1', 'iso_8859-1:1987', 'l1', 'latin1', 'us-ascii')
|
||||
// prettier-ignore
|
||||
labels['windows-1254'].push('csisolatin5', 'iso-8859-9', 'iso-ir-148', 'iso8859-9', 'iso88599', 'iso_8859-9', 'iso_8859-9:1989', 'l5', 'latin5')
|
||||
|
||||
export default labels
|
||||
64
node_modules/@exodus/bytes/fallback/encoding.util.js
generated
vendored
Normal file
64
node_modules/@exodus/bytes/fallback/encoding.util.js
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
// Get a number of last bytes in an Uint8Array `u` ending at `len` that don't
|
||||
// form a codepoint yet, but can be a part of a single codepoint on more data
|
||||
export function unfinishedBytes(u, len, enc) {
|
||||
switch (enc) {
|
||||
case 'utf-8': {
|
||||
// 0-3
|
||||
let p = 0
|
||||
while (p < 2 && p < len && (u[len - p - 1] & 0xc0) === 0x80) p++ // go back 0-2 trailing bytes
|
||||
if (p === len) return 0 // no space for lead
|
||||
const l = u[len - p - 1]
|
||||
if (l < 0xc2 || l > 0xf4) return 0 // not a lead
|
||||
if (p === 0) return 1 // nothing to recheck, we have only lead, return it. 2-byte must return here
|
||||
if (l < 0xe0 || (l < 0xf0 && p >= 2)) return 0 // 2-byte, or 3-byte or less and we already have 2 trailing
|
||||
const lower = l === 0xf0 ? 0x90 : l === 0xe0 ? 0xa0 : 0x80
|
||||
const upper = l === 0xf4 ? 0x8f : l === 0xed ? 0x9f : 0xbf
|
||||
const n = u[len - p]
|
||||
return n >= lower && n <= upper ? p + 1 : 0
|
||||
}
|
||||
|
||||
case 'utf-16le':
|
||||
case 'utf-16be': {
|
||||
// 0-3
|
||||
const p = len % 2 // uneven byte length adds 1
|
||||
if (len < 2) return p
|
||||
const l = len - p - 1
|
||||
const last = enc === 'utf-16le' ? (u[l] << 8) ^ u[l - 1] : (u[l - 1] << 8) ^ u[l]
|
||||
return last >= 0xd8_00 && last < 0xdc_00 ? p + 2 : p // lone lead adds 2
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Unsupported encoding')
|
||||
}
|
||||
|
||||
// Merge prefix `chunk` with `u` and return new combined prefix
|
||||
// For u.length < 3, fully consumes u and can return unfinished data,
|
||||
// otherwise returns a prefix with no unfinished bytes
|
||||
export function mergePrefix(u, chunk, enc) {
|
||||
if (u.length === 0) return chunk
|
||||
if (u.length < 3) {
|
||||
// No reason to bruteforce offsets, also it's possible this doesn't yet end the sequence
|
||||
const a = new Uint8Array(u.length + chunk.length)
|
||||
a.set(chunk)
|
||||
a.set(u, chunk.length)
|
||||
return a
|
||||
}
|
||||
|
||||
// Slice off a small portion of u into prefix chunk so we can decode them separately without extending array size
|
||||
const t = new Uint8Array(chunk.length + 3) // We have 1-3 bytes and need 1-3 more bytes
|
||||
t.set(chunk)
|
||||
t.set(u.subarray(0, 3), chunk.length)
|
||||
|
||||
// Stop at the first offset where unfinished bytes reaches 0 or fits into u
|
||||
// If that doesn't happen (u too short), just concat chunk and u completely (above)
|
||||
for (let i = 1; i <= 3; i++) {
|
||||
const unfinished = unfinishedBytes(t, chunk.length + i, enc) // 0-3
|
||||
if (unfinished <= i) {
|
||||
// Always reachable at 3, but we still need 'unfinished' value for it
|
||||
const add = i - unfinished // 0-3
|
||||
return add > 0 ? t.subarray(0, chunk.length + add) : chunk
|
||||
}
|
||||
}
|
||||
|
||||
// Unreachable
|
||||
}
|
||||
127
node_modules/@exodus/bytes/fallback/hex.js
generated
vendored
Normal file
127
node_modules/@exodus/bytes/fallback/hex.js
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
import { assertUint8 } from '../assert.js'
|
||||
import { nativeDecoder, nativeEncoder, decode2string, E_STRING } from './_utils.js'
|
||||
import { encodeAscii, decodeAscii } from './latin1.js'
|
||||
|
||||
let hexArray // array of 256 bytes converted to two-char hex strings
|
||||
let hexCodes // hexArray converted to u16 code pairs
|
||||
let dehexArray
|
||||
const _00 = 0x30_30 // '00' string in hex, the only allowed char pair to generate 0 byte
|
||||
const _ff = 0x66_66 // 'ff' string in hex, max allowed char pair (larger than 'FF' string)
|
||||
const allowed = '0123456789ABCDEFabcdef'
|
||||
|
||||
export const E_HEX = 'Input is not a hex string'
|
||||
|
||||
export function toHex(arr) {
|
||||
assertUint8(arr)
|
||||
|
||||
if (!hexArray) hexArray = Array.from({ length: 256 }, (_, i) => i.toString(16).padStart(2, '0'))
|
||||
const length = arr.length // this helps Hermes
|
||||
|
||||
// Only old browsers use this, barebone engines don't have TextDecoder
|
||||
// But Hermes can use this when it (hopefully) implements TextDecoder
|
||||
if (nativeDecoder) {
|
||||
if (!hexCodes) {
|
||||
hexCodes = new Uint16Array(256)
|
||||
const u8 = new Uint8Array(hexCodes.buffer, hexCodes.byteOffset, hexCodes.byteLength)
|
||||
for (let i = 0; i < 256; i++) {
|
||||
const pair = hexArray[i]
|
||||
u8[2 * i] = pair.charCodeAt(0)
|
||||
u8[2 * i + 1] = pair.charCodeAt(1)
|
||||
}
|
||||
}
|
||||
|
||||
const oa = new Uint16Array(length)
|
||||
let i = 0
|
||||
for (const last3 = arr.length - 3; ; i += 4) {
|
||||
if (i >= last3) break // loop is fast enough for moving this here to be useful on JSC
|
||||
const x0 = arr[i]
|
||||
const x1 = arr[i + 1]
|
||||
const x2 = arr[i + 2]
|
||||
const x3 = arr[i + 3]
|
||||
oa[i] = hexCodes[x0]
|
||||
oa[i + 1] = hexCodes[x1]
|
||||
oa[i + 2] = hexCodes[x2]
|
||||
oa[i + 3] = hexCodes[x3]
|
||||
}
|
||||
|
||||
for (; i < length; i++) oa[i] = hexCodes[arr[i]]
|
||||
return decodeAscii(oa)
|
||||
}
|
||||
|
||||
return decode2string(arr, 0, length, hexArray)
|
||||
}
|
||||
|
||||
export function fromHex(str) {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (str.length % 2 !== 0) throw new SyntaxError(E_HEX)
|
||||
|
||||
const length = str.length / 2 // this helps Hermes in loops
|
||||
const arr = new Uint8Array(length)
|
||||
|
||||
// Native encoder path is beneficial even for small arrays in Hermes
|
||||
if (nativeEncoder) {
|
||||
if (!dehexArray) {
|
||||
dehexArray = new Uint8Array(_ff + 1) // 26 KiB cache, >2x perf improvement on Hermes
|
||||
const u8 = new Uint8Array(2)
|
||||
const u16 = new Uint16Array(u8.buffer, u8.byteOffset, 1) // for endianess-agnostic transform
|
||||
const map = [...allowed].map((c) => [c.charCodeAt(0), parseInt(c, 16)])
|
||||
for (const [ch, vh] of map) {
|
||||
u8[0] = ch // first we read high hex char
|
||||
for (const [cl, vl] of map) {
|
||||
u8[1] = cl // then we read low hex char
|
||||
dehexArray[u16[0]] = (vh << 4) | vl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const codes = encodeAscii(str, E_HEX)
|
||||
const codes16 = new Uint16Array(codes.buffer, codes.byteOffset, codes.byteLength / 2)
|
||||
let i = 0
|
||||
for (const last3 = length - 3; i < last3; i += 4) {
|
||||
const ai = codes16[i]
|
||||
const bi = codes16[i + 1]
|
||||
const ci = codes16[i + 2]
|
||||
const di = codes16[i + 3]
|
||||
const a = dehexArray[ai]
|
||||
const b = dehexArray[bi]
|
||||
const c = dehexArray[ci]
|
||||
const d = dehexArray[di]
|
||||
if ((!a && ai !== _00) || (!b && bi !== _00) || (!c && ci !== _00) || (!d && di !== _00)) {
|
||||
throw new SyntaxError(E_HEX)
|
||||
}
|
||||
|
||||
arr[i] = a
|
||||
arr[i + 1] = b
|
||||
arr[i + 2] = c
|
||||
arr[i + 3] = d
|
||||
}
|
||||
|
||||
while (i < length) {
|
||||
const ai = codes16[i]
|
||||
const a = dehexArray[ai]
|
||||
if (!a && ai !== _00) throw new SyntaxError(E_HEX)
|
||||
arr[i++] = a
|
||||
}
|
||||
} else {
|
||||
if (!dehexArray) {
|
||||
// no regex input validation here, so we map all other bytes to -1 and recheck sign
|
||||
// non-ASCII chars throw already though, so we should process only 0-127
|
||||
dehexArray = new Int8Array(128).fill(-1)
|
||||
for (let i = 0; i < 16; i++) {
|
||||
const s = i.toString(16)
|
||||
dehexArray[s.charCodeAt(0)] = dehexArray[s.toUpperCase().charCodeAt(0)] = i
|
||||
}
|
||||
}
|
||||
|
||||
let j = 0
|
||||
for (let i = 0; i < length; i++) {
|
||||
const a = str.charCodeAt(j++)
|
||||
const b = str.charCodeAt(j++)
|
||||
const res = (dehexArray[a] << 4) | dehexArray[b]
|
||||
if (res < 0 || (0x7f | a | b) !== 0x7f) throw new SyntaxError(E_HEX) // 0-127
|
||||
arr[i] = res
|
||||
}
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
169
node_modules/@exodus/bytes/fallback/latin1.js
generated
vendored
Normal file
169
node_modules/@exodus/bytes/fallback/latin1.js
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
import {
|
||||
nativeEncoder,
|
||||
nativeDecoder,
|
||||
nativeDecoderLatin1,
|
||||
nativeBuffer,
|
||||
isHermes,
|
||||
isDeno,
|
||||
isLE,
|
||||
skipWeb,
|
||||
} from './_utils.js'
|
||||
|
||||
const { atob } = globalThis
|
||||
const { toBase64: web64 } = Uint8Array.prototype
|
||||
|
||||
// See http://stackoverflow.com/a/22747272/680742, which says that lowest limit is in Chrome, with 0xffff args
|
||||
// On Hermes, actual max is 0x20_000 minus current stack depth, 1/16 of that should be safe
|
||||
const maxFunctionArgs = 0x20_00
|
||||
|
||||
// toBase64+atob path is faster on everything where fromBase64 is fast
|
||||
const useLatin1atob = web64 && atob && !skipWeb
|
||||
|
||||
export function asciiPrefix(arr) {
|
||||
let p = 0 // verified ascii bytes
|
||||
const length = arr.length
|
||||
// Threshold tested on Hermes (worse on <=48, better on >=52)
|
||||
// Also on v8 arrs of size <=64 might be on heap and using Uint32Array on them is unoptimal
|
||||
if (length > 64) {
|
||||
// Speedup with u32
|
||||
const u32start = (4 - (arr.byteOffset & 3)) % 4 // offset start by this many bytes for alignment
|
||||
for (; p < u32start; p++) if (arr[p] >= 0x80) return p
|
||||
const u32length = ((arr.byteLength - u32start) / 4) | 0
|
||||
const u32 = new Uint32Array(arr.buffer, arr.byteOffset + u32start, u32length)
|
||||
let i = 0
|
||||
for (const last3 = u32length - 3; ; p += 16, i += 4) {
|
||||
if (i >= last3) break // loop is fast enough for moving this here to be _very_ useful, likely due to array access checks
|
||||
const a = u32[i]
|
||||
const b = u32[i + 1]
|
||||
const c = u32[i + 2]
|
||||
const d = u32[i + 3]
|
||||
// "(a | b | c | d) & mask" is slower on Hermes though faster on v8
|
||||
if (a & 0x80_80_80_80 || b & 0x80_80_80_80 || c & 0x80_80_80_80 || d & 0x80_80_80_80) break
|
||||
}
|
||||
|
||||
for (; i < u32length; p += 4, i++) if (u32[i] & 0x80_80_80_80) break
|
||||
}
|
||||
|
||||
for (; p < length; p++) if (arr[p] >= 0x80) return p
|
||||
return length
|
||||
}
|
||||
|
||||
// Capable of decoding Uint16Array to UTF-16 as well as Uint8Array to Latin-1
|
||||
export function decodeLatin1(arr, start = 0, stop = arr.length) {
|
||||
start |= 0
|
||||
stop |= 0
|
||||
const total = stop - start
|
||||
if (total === 0) return ''
|
||||
|
||||
if (
|
||||
useLatin1atob &&
|
||||
total >= 256 &&
|
||||
total < 1e8 &&
|
||||
arr.toBase64 === web64 &&
|
||||
arr.BYTES_PER_ELEMENT === 1
|
||||
) {
|
||||
const sliced = start === 0 && stop === arr.length ? arr : arr.subarray(start, stop)
|
||||
return atob(sliced.toBase64())
|
||||
}
|
||||
|
||||
if (total > maxFunctionArgs) {
|
||||
let prefix = ''
|
||||
for (let i = start; i < stop; ) {
|
||||
const i1 = Math.min(stop, i + maxFunctionArgs)
|
||||
prefix += String.fromCharCode.apply(String, arr.subarray(i, i1))
|
||||
i = i1
|
||||
}
|
||||
|
||||
return prefix
|
||||
}
|
||||
|
||||
const sliced = start === 0 && stop === arr.length ? arr : arr.subarray(start, stop)
|
||||
return String.fromCharCode.apply(String, sliced)
|
||||
}
|
||||
|
||||
// Unchecked for well-formedness, raw. Expects Uint16Array input
|
||||
export const decodeUCS2 =
|
||||
nativeBuffer && isLE && !isDeno
|
||||
? (u16, stop = u16.length) => {
|
||||
// TODO: fast path for BE, perhaps faster path for Deno. Note that decoder replaces, this function doesn't
|
||||
if (stop > 32) return nativeBuffer.from(u16.buffer, u16.byteOffset, stop * 2).ucs2Slice() // from 64 bytes, below are in heap
|
||||
return decodeLatin1(u16, 0, stop)
|
||||
}
|
||||
: (u16, stop = u16.length) => decodeLatin1(u16, 0, stop)
|
||||
|
||||
// Does not check input, uses best available method
|
||||
// Building an array for this is only faster than proper string concatenation when TextDecoder or native Buffer are available
|
||||
export const decodeAscii = nativeBuffer
|
||||
? (a) =>
|
||||
// Buffer is faster on Node.js (but only for long enough data), if we know that output is ascii
|
||||
a.byteLength >= 0x3_00 && !isDeno
|
||||
? nativeBuffer.from(a.buffer, a.byteOffset, a.byteLength).latin1Slice(0, a.byteLength) // .latin1Slice is faster than .asciiSlice
|
||||
: nativeDecoder.decode(a) // On Node.js, utf8 decoder is faster than latin1
|
||||
: nativeDecoderLatin1
|
||||
? (a) => nativeDecoderLatin1.decode(a) // On browsers (specifically WebKit), latin1 decoder is faster than utf8
|
||||
: (a) =>
|
||||
decodeLatin1(
|
||||
a instanceof Uint8Array ? a : new Uint8Array(a.buffer, a.byteOffset, a.byteLength)
|
||||
)
|
||||
|
||||
/* eslint-disable @exodus/mutable/no-param-reassign-prop-only */
|
||||
|
||||
export const encodeCharcodes = isHermes
|
||||
? (str, arr) => {
|
||||
const length = str.length
|
||||
if (length > 64) {
|
||||
const at = str.charCodeAt.bind(str) // faster on strings from ~64 chars on Hermes, but can be 10x slower on e.g. JSC
|
||||
for (let i = 0; i < length; i++) arr[i] = at(i)
|
||||
} else {
|
||||
for (let i = 0; i < length; i++) arr[i] = str.charCodeAt(i)
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
: (str, arr) => {
|
||||
const length = str.length
|
||||
// Can be optimized with unrolling, but this is not used on non-Hermes atm
|
||||
for (let i = 0; i < length; i++) arr[i] = str.charCodeAt(i)
|
||||
return arr
|
||||
}
|
||||
|
||||
export function encodeAsciiPrefix(x, s) {
|
||||
let i = 0
|
||||
for (const len3 = s.length - 3; i < len3; i += 4) {
|
||||
const x0 = s.charCodeAt(i), x1 = s.charCodeAt(i + 1), x2 = s.charCodeAt(i + 2), x3 = s.charCodeAt(i + 3) // prettier-ignore
|
||||
if ((x0 | x1 | x2 | x3) >= 128) break
|
||||
x[i] = x0
|
||||
x[i + 1] = x1
|
||||
x[i + 2] = x2
|
||||
x[i + 3] = x3
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
/* eslint-enable @exodus/mutable/no-param-reassign-prop-only */
|
||||
|
||||
// Warning: can be used only on checked strings, converts strings to 8-bit
|
||||
export const encodeLatin1 = (str) => encodeCharcodes(str, new Uint8Array(str.length))
|
||||
|
||||
// Expects nativeEncoder to be present
|
||||
export const encodeAscii = isHermes
|
||||
? (str, ERR) => {
|
||||
// Much faster in Hermes
|
||||
const codes = new Uint8Array(str.length + 4) // overshoot by a full utf8 char
|
||||
const info = nativeEncoder.encodeInto(str, codes)
|
||||
if (info.read !== str.length || info.written !== str.length) throw new SyntaxError(ERR) // non-ascii
|
||||
return codes.subarray(0, str.length)
|
||||
}
|
||||
: nativeBuffer
|
||||
? (str, ERR) => {
|
||||
// TextEncoder is slow on Node.js 24 / 25 (was ok on 22)
|
||||
const codes = nativeBuffer.from(str, 'utf8') // ascii/latin1 coerces, we need to check
|
||||
if (codes.length !== str.length) throw new SyntaxError(ERR) // non-ascii
|
||||
return new Uint8Array(codes.buffer, codes.byteOffset, codes.byteLength)
|
||||
}
|
||||
: (str, ERR) => {
|
||||
const codes = nativeEncoder.encode(str)
|
||||
if (codes.length !== str.length) throw new SyntaxError(ERR) // non-ascii
|
||||
return codes
|
||||
}
|
||||
1
node_modules/@exodus/bytes/fallback/multi-byte.encodings.cjs
generated
vendored
Normal file
1
node_modules/@exodus/bytes/fallback/multi-byte.encodings.cjs
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
module.exports = () => require('./multi-byte.encodings.json') // lazy-load
|
||||
546
node_modules/@exodus/bytes/fallback/multi-byte.encodings.json
generated
vendored
Normal file
546
node_modules/@exodus/bytes/fallback/multi-byte.encodings.json
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
962
node_modules/@exodus/bytes/fallback/multi-byte.js
generated
vendored
Normal file
962
node_modules/@exodus/bytes/fallback/multi-byte.js
generated
vendored
Normal file
@@ -0,0 +1,962 @@
|
||||
import { E_STRING } from './_utils.js'
|
||||
import { asciiPrefix, decodeAscii, decodeLatin1, decodeUCS2, encodeAscii } from './latin1.js'
|
||||
import { getTable } from './multi-byte.table.js'
|
||||
|
||||
export const E_STRICT = 'Input is not well-formed for this encoding'
|
||||
|
||||
/* Decoders */
|
||||
|
||||
// If the decoder is not cleared properly, state can be preserved between non-streaming calls!
|
||||
// See comment about fatal stream
|
||||
|
||||
// All except iso-2022-jp are ASCII supersets
|
||||
// When adding something that is not an ASCII superset, ajust the ASCII fast path
|
||||
const mappers = {
|
||||
// https://encoding.spec.whatwg.org/#euc-kr-decoder
|
||||
'euc-kr': (err) => {
|
||||
const euc = getTable('euc-kr')
|
||||
let lead = 0
|
||||
let oi = 0
|
||||
let o16
|
||||
|
||||
const decodeLead = (b) => {
|
||||
if (b < 0x41 || b > 0xfe) {
|
||||
lead = 0
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b
|
||||
} else {
|
||||
const p = euc[(lead - 0x81) * 190 + b - 0x41]
|
||||
lead = 0
|
||||
if (p) {
|
||||
o16[oi++] = p
|
||||
} else {
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const decode = (arr, start, end, stream) => {
|
||||
let i = start
|
||||
o16 = new Uint16Array(end - start + (lead ? 1 : 0)) // there are pairs but they consume more than one byte
|
||||
oi = 0
|
||||
|
||||
// Fast path
|
||||
if (!lead) {
|
||||
for (const last1 = end - 1; i < last1; ) {
|
||||
const l = arr[i]
|
||||
if (l < 128) {
|
||||
o16[oi++] = l
|
||||
i++
|
||||
} else {
|
||||
if (l === 0x80 || l === 0xff) break
|
||||
const b = arr[i + 1]
|
||||
if (b < 0x41 || b === 0xff) break
|
||||
const p = euc[(l - 0x81) * 190 + b - 0x41]
|
||||
if (!p) break
|
||||
o16[oi++] = p
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && i < end) decodeLead(arr[i++])
|
||||
while (i < end) {
|
||||
const b = arr[i++]
|
||||
if (b < 128) {
|
||||
o16[oi++] = b
|
||||
} else if (b === 0x80 || b === 0xff) {
|
||||
o16[oi++] = err()
|
||||
} else {
|
||||
lead = b
|
||||
if (i < end) decodeLead(arr[i++])
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && !stream) {
|
||||
lead = 0
|
||||
o16[oi++] = err()
|
||||
}
|
||||
|
||||
const res = decodeUCS2(o16, oi)
|
||||
o16 = null
|
||||
return res
|
||||
}
|
||||
|
||||
return { decode, isAscii: () => lead === 0 }
|
||||
},
|
||||
// https://encoding.spec.whatwg.org/#euc-jp-decoder
|
||||
'euc-jp': (err) => {
|
||||
const jis0208 = getTable('jis0208')
|
||||
const jis0212 = getTable('jis0212')
|
||||
let j12 = false
|
||||
let lead = 0
|
||||
let oi = 0
|
||||
let o16
|
||||
|
||||
const decodeLead = (b) => {
|
||||
if (lead === 0x8e && b >= 0xa1 && b <= 0xdf) {
|
||||
lead = 0
|
||||
o16[oi++] = 0xfe_c0 + b
|
||||
} else if (lead === 0x8f && b >= 0xa1 && b <= 0xfe) {
|
||||
j12 = true
|
||||
lead = b
|
||||
} else {
|
||||
let cp
|
||||
if (lead >= 0xa1 && lead <= 0xfe && b >= 0xa1 && b <= 0xfe) {
|
||||
cp = (j12 ? jis0212 : jis0208)[(lead - 0xa1) * 94 + b - 0xa1]
|
||||
}
|
||||
|
||||
lead = 0
|
||||
j12 = false
|
||||
if (cp) {
|
||||
o16[oi++] = cp
|
||||
} else {
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const decode = (arr, start, end, stream) => {
|
||||
let i = start
|
||||
o16 = new Uint16Array(end - start + (lead ? 1 : 0))
|
||||
oi = 0
|
||||
|
||||
// Fast path, non-j12
|
||||
// lead = 0 means j12 = 0
|
||||
if (!lead) {
|
||||
for (const last1 = end - 1; i < last1; ) {
|
||||
const l = arr[i]
|
||||
if (l < 128) {
|
||||
o16[oi++] = l
|
||||
i++
|
||||
} else {
|
||||
const b = arr[i + 1]
|
||||
if (l === 0x8e && b >= 0xa1 && b <= 0xdf) {
|
||||
o16[oi++] = 0xfe_c0 + b
|
||||
i += 2
|
||||
} else {
|
||||
if (l < 0xa1 || l === 0xff || b < 0xa1 || b === 0xff) break
|
||||
const cp = jis0208[(l - 0xa1) * 94 + b - 0xa1]
|
||||
if (!cp) break
|
||||
o16[oi++] = cp
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && i < end) decodeLead(arr[i++])
|
||||
if (lead && i < end) decodeLead(arr[i++]) // could be two leads, but no more
|
||||
while (i < end) {
|
||||
const b = arr[i++]
|
||||
if (b < 128) {
|
||||
o16[oi++] = b
|
||||
} else if ((b < 0xa1 && b !== 0x8e && b !== 0x8f) || b === 0xff) {
|
||||
o16[oi++] = err()
|
||||
} else {
|
||||
lead = b
|
||||
if (i < end) decodeLead(arr[i++])
|
||||
if (lead && i < end) decodeLead(arr[i++]) // could be two leads
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && !stream) {
|
||||
lead = 0
|
||||
j12 = false // can be true only when lead is non-zero
|
||||
o16[oi++] = err()
|
||||
}
|
||||
|
||||
const res = decodeUCS2(o16, oi)
|
||||
o16 = null
|
||||
return res
|
||||
}
|
||||
|
||||
return { decode, isAscii: () => lead === 0 } // j12 can be true only when lead is non-zero
|
||||
},
|
||||
// https://encoding.spec.whatwg.org/#iso-2022-jp-decoder
|
||||
'iso-2022-jp': (err) => {
|
||||
const jis0208 = getTable('jis0208')
|
||||
let dState = 1
|
||||
let oState = 1
|
||||
let lead = 0 // 0 or 0x21-0x7e
|
||||
let out = false
|
||||
|
||||
const bytes = (pushback, b) => {
|
||||
if (dState < 5 && b === 0x1b) {
|
||||
dState = 6 // escape start
|
||||
return
|
||||
}
|
||||
|
||||
switch (dState) {
|
||||
case 1:
|
||||
case 2:
|
||||
// ASCII, Roman (common)
|
||||
out = false
|
||||
if (dState === 2) {
|
||||
if (b === 0x5c) return 0xa5
|
||||
if (b === 0x7e) return 0x20_3e
|
||||
}
|
||||
|
||||
if (b <= 0x7f && b !== 0x0e && b !== 0x0f) return b
|
||||
return err()
|
||||
case 3:
|
||||
// Katakana
|
||||
out = false
|
||||
if (b >= 0x21 && b <= 0x5f) return 0xff_40 + b
|
||||
return err()
|
||||
case 4:
|
||||
// Leading byte
|
||||
out = false
|
||||
if (b < 0x21 || b > 0x7e) return err()
|
||||
lead = b
|
||||
dState = 5
|
||||
return
|
||||
case 5:
|
||||
// Trailing byte
|
||||
out = false
|
||||
if (b === 0x1b) {
|
||||
dState = 6 // escape start
|
||||
return err()
|
||||
}
|
||||
|
||||
dState = 4
|
||||
if (b >= 0x21 && b <= 0x7e) {
|
||||
const cp = jis0208[(lead - 0x21) * 94 + b - 0x21]
|
||||
if (cp) return cp
|
||||
}
|
||||
|
||||
return err()
|
||||
case 6:
|
||||
// Escape start
|
||||
if (b === 0x24 || b === 0x28) {
|
||||
lead = b
|
||||
dState = 7
|
||||
return
|
||||
}
|
||||
|
||||
out = false
|
||||
dState = oState
|
||||
pushback.push(b)
|
||||
return err()
|
||||
case 7: {
|
||||
// Escape
|
||||
const l = lead
|
||||
lead = 0
|
||||
let s
|
||||
if (l === 0x28) {
|
||||
// eslint-disable-next-line unicorn/prefer-switch
|
||||
if (b === 0x42) {
|
||||
s = 1
|
||||
} else if (b === 0x4a) {
|
||||
s = 2
|
||||
} else if (b === 0x49) {
|
||||
s = 3
|
||||
}
|
||||
} else if (l === 0x24 && (b === 0x40 || b === 0x42)) {
|
||||
s = 4
|
||||
}
|
||||
|
||||
if (s) {
|
||||
dState = oState = s
|
||||
const output = out
|
||||
out = true
|
||||
return output ? err() : undefined
|
||||
}
|
||||
|
||||
out = false
|
||||
dState = oState
|
||||
pushback.push(b, l)
|
||||
return err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const eof = (pushback) => {
|
||||
if (dState < 5) return null
|
||||
out = false
|
||||
switch (dState) {
|
||||
case 5:
|
||||
dState = 4
|
||||
return err()
|
||||
case 6:
|
||||
dState = oState
|
||||
return err()
|
||||
case 7: {
|
||||
dState = oState
|
||||
pushback.push(lead)
|
||||
lead = 0
|
||||
return err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const decode = (arr, start, end, stream) => {
|
||||
const o16 = new Uint16Array(end - start + 2) // err in eof + lead from state
|
||||
let oi = 0
|
||||
let i = start
|
||||
const pushback = [] // local and auto-cleared
|
||||
|
||||
// First, dump everything until EOF
|
||||
// Same as the full loop, but without EOF handling
|
||||
while (i < end || pushback.length > 0) {
|
||||
const c = bytes(pushback, pushback.length > 0 ? pushback.pop() : arr[i++])
|
||||
if (c !== undefined) o16[oi++] = c // 16-bit
|
||||
}
|
||||
|
||||
// Then, dump EOF. This needs the same loop as the characters can be pushed back
|
||||
if (!stream) {
|
||||
while (i <= end || pushback.length > 0) {
|
||||
if (i < end || pushback.length > 0) {
|
||||
const c = bytes(pushback, pushback.length > 0 ? pushback.pop() : arr[i++])
|
||||
if (c !== undefined) o16[oi++] = c // 16-bit
|
||||
} else {
|
||||
const c = eof(pushback)
|
||||
if (c === null) break // clean exit
|
||||
o16[oi++] = c
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Chrome and WebKit fail on this, we don't: completely destroy the old decoder state when finished streaming
|
||||
// > If this’s do not flush is false, then set this’s decoder to a new instance of this’s encoding’s decoder,
|
||||
// > Set this’s do not flush to options["stream"]
|
||||
if (!stream) {
|
||||
dState = oState = 1
|
||||
lead = 0
|
||||
out = false
|
||||
}
|
||||
|
||||
return decodeUCS2(o16, oi)
|
||||
}
|
||||
|
||||
return { decode, isAscii: () => false }
|
||||
},
|
||||
// https://encoding.spec.whatwg.org/#shift_jis-decoder
|
||||
shift_jis: (err) => {
|
||||
const jis0208 = getTable('jis0208')
|
||||
let lead = 0
|
||||
let oi = 0
|
||||
let o16
|
||||
|
||||
const decodeLead = (b) => {
|
||||
const l = lead
|
||||
lead = 0
|
||||
if (b >= 0x40 && b <= 0xfc && b !== 0x7f) {
|
||||
const p = (l - (l < 0xa0 ? 0x81 : 0xc1)) * 188 + b - (b < 0x7f ? 0x40 : 0x41)
|
||||
if (p >= 8836 && p <= 10_715) {
|
||||
o16[oi++] = 0xe0_00 - 8836 + p
|
||||
return
|
||||
}
|
||||
|
||||
const cp = jis0208[p]
|
||||
if (cp) {
|
||||
o16[oi++] = cp
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b
|
||||
}
|
||||
|
||||
const decode = (arr, start, end, stream) => {
|
||||
o16 = new Uint16Array(end - start + (lead ? 1 : 0))
|
||||
oi = 0
|
||||
let i = start
|
||||
|
||||
// Fast path
|
||||
if (!lead) {
|
||||
for (const last1 = end - 1; i < last1; ) {
|
||||
const l = arr[i]
|
||||
if (l <= 0x80) {
|
||||
o16[oi++] = l
|
||||
i++
|
||||
} else if (l >= 0xa1 && l <= 0xdf) {
|
||||
o16[oi++] = 0xfe_c0 + l
|
||||
i++
|
||||
} else {
|
||||
if (l === 0xa0 || l > 0xfc) break
|
||||
const b = arr[i + 1]
|
||||
if (b < 0x40 || b > 0xfc || b === 0x7f) break
|
||||
const p = (l - (l < 0xa0 ? 0x81 : 0xc1)) * 188 + b - (b < 0x7f ? 0x40 : 0x41)
|
||||
if (p >= 8836 && p <= 10_715) {
|
||||
o16[oi++] = 0xe0_00 - 8836 + p
|
||||
i += 2
|
||||
} else {
|
||||
const cp = jis0208[p]
|
||||
if (!cp) break
|
||||
o16[oi++] = cp
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && i < end) decodeLead(arr[i++])
|
||||
while (i < end) {
|
||||
const b = arr[i++]
|
||||
if (b <= 0x80) {
|
||||
o16[oi++] = b // 0x80 is allowed
|
||||
} else if (b >= 0xa1 && b <= 0xdf) {
|
||||
o16[oi++] = 0xfe_c0 + b
|
||||
} else if (b === 0xa0 || b > 0xfc) {
|
||||
o16[oi++] = err()
|
||||
} else {
|
||||
lead = b
|
||||
if (i < end) decodeLead(arr[i++])
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && !stream) {
|
||||
lead = 0
|
||||
o16[oi++] = err()
|
||||
}
|
||||
|
||||
const res = decodeUCS2(o16, oi)
|
||||
o16 = null
|
||||
return res
|
||||
}
|
||||
|
||||
return { decode, isAscii: () => lead === 0 }
|
||||
},
|
||||
// https://encoding.spec.whatwg.org/#gbk-decoder
|
||||
gbk: (err) => mappers.gb18030(err), // 10.1.1. GBK’s decoder is gb18030’s decoder
|
||||
// https://encoding.spec.whatwg.org/#gb18030-decoder
|
||||
gb18030: (err) => {
|
||||
const gb18030 = getTable('gb18030')
|
||||
const gb18030r = getTable('gb18030-ranges')
|
||||
let g1 = 0, g2 = 0, g3 = 0 // prettier-ignore
|
||||
const index = (p) => {
|
||||
if ((p > 39_419 && p < 189_000) || p > 1_237_575) return
|
||||
if (p === 7457) return 0xe7_c7
|
||||
let a = 0, b = 0 // prettier-ignore
|
||||
for (const [c, d] of gb18030r) {
|
||||
if (c > p) break
|
||||
a = c
|
||||
b = d
|
||||
}
|
||||
|
||||
return b + p - a
|
||||
}
|
||||
|
||||
// g1 is 0 or 0x81-0xfe
|
||||
// g2 is 0 or 0x30-0x39
|
||||
// g3 is 0 or 0x81-0xfe
|
||||
|
||||
const decode = (arr, start, end, stream) => {
|
||||
const o16 = new Uint16Array(end - start + (g1 ? 3 : 0)) // even with pushback it's at most 1 char per byte
|
||||
let oi = 0
|
||||
let i = start
|
||||
const pushback = [] // local and auto-cleared
|
||||
|
||||
// Fast path for 2-byte only
|
||||
// pushback is always empty ad start, and g1 = 0 means g2 = g3 = 0
|
||||
if (g1 === 0) {
|
||||
for (const last1 = end - 1; i < last1; ) {
|
||||
const b = arr[i]
|
||||
if (b < 128) {
|
||||
o16[oi++] = b
|
||||
i++
|
||||
} else if (b === 0x80) {
|
||||
o16[oi++] = 0x20_ac
|
||||
i++
|
||||
} else {
|
||||
if (b === 0xff) break
|
||||
const n = arr[i + 1]
|
||||
let cp
|
||||
if (n < 0x7f) {
|
||||
if (n < 0x40) break
|
||||
cp = gb18030[(b - 0x81) * 190 + n - 0x40]
|
||||
} else {
|
||||
if (n === 0xff || n === 0x7f) break
|
||||
cp = gb18030[(b - 0x81) * 190 + n - 0x41]
|
||||
}
|
||||
|
||||
if (!cp) break
|
||||
o16[oi++] = cp // 16-bit
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// First, dump everything until EOF
|
||||
// Same as the full loop, but without EOF handling
|
||||
while (i < end || pushback.length > 0) {
|
||||
const b = pushback.length > 0 ? pushback.pop() : arr[i++]
|
||||
if (g1) {
|
||||
// g2 can be set only when g1 is set, g3 can be set only when g2 is set
|
||||
// hence, 3 checks for g3 is faster than 3 checks for g1
|
||||
if (g2) {
|
||||
if (g3) {
|
||||
if (b <= 0x39 && b >= 0x30) {
|
||||
const p = index(
|
||||
(g1 - 0x81) * 12_600 + (g2 - 0x30) * 1260 + (g3 - 0x81) * 10 + b - 0x30
|
||||
)
|
||||
g1 = g2 = g3 = 0
|
||||
if (p === undefined) {
|
||||
o16[oi++] = err()
|
||||
} else if (p <= 0xff_ff) {
|
||||
o16[oi++] = p // Can validly return replacement
|
||||
} else {
|
||||
const d = p - 0x1_00_00
|
||||
o16[oi++] = 0xd8_00 | (d >> 10)
|
||||
o16[oi++] = 0xdc_00 | (d & 0x3_ff)
|
||||
}
|
||||
} else {
|
||||
pushback.push(b, g3, g2)
|
||||
g1 = g2 = g3 = 0
|
||||
o16[oi++] = err()
|
||||
}
|
||||
} else if (b >= 0x81 && b <= 0xfe) {
|
||||
g3 = b
|
||||
} else {
|
||||
pushback.push(b, g2)
|
||||
g1 = g2 = 0
|
||||
o16[oi++] = err()
|
||||
}
|
||||
} else if (b <= 0x39 && b >= 0x30) {
|
||||
g2 = b
|
||||
} else {
|
||||
let cp
|
||||
if (b >= 0x40 && b <= 0xfe && b !== 0x7f) {
|
||||
cp = gb18030[(g1 - 0x81) * 190 + b - (b < 0x7f ? 0x40 : 0x41)]
|
||||
}
|
||||
|
||||
g1 = 0
|
||||
if (cp) {
|
||||
o16[oi++] = cp // 16-bit
|
||||
} else {
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b // can be processed immediately
|
||||
}
|
||||
}
|
||||
} else if (b < 128) {
|
||||
o16[oi++] = b
|
||||
} else if (b === 0x80) {
|
||||
o16[oi++] = 0x20_ac
|
||||
} else if (b === 0xff) {
|
||||
o16[oi++] = err()
|
||||
} else {
|
||||
g1 = b
|
||||
}
|
||||
}
|
||||
|
||||
// if g1 = 0 then g2 = g3 = 0
|
||||
if (g1 && !stream) {
|
||||
g1 = g2 = g3 = 0
|
||||
o16[oi++] = err()
|
||||
}
|
||||
|
||||
return decodeUCS2(o16, oi)
|
||||
}
|
||||
|
||||
return { decode, isAscii: () => g1 === 0 } // if g1 = 0 then g2 = g3 = 0
|
||||
},
|
||||
// https://encoding.spec.whatwg.org/#big5
|
||||
big5: (err) => {
|
||||
// The only decoder which returns multiple codepoints per byte, also has non-charcode codepoints
|
||||
// We store that as strings
|
||||
const big5 = getTable('big5')
|
||||
let lead = 0
|
||||
let oi = 0
|
||||
let o16
|
||||
|
||||
const decodeLead = (b) => {
|
||||
if (b < 0x40 || (b > 0x7e && b < 0xa1) || b === 0xff) {
|
||||
lead = 0
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b
|
||||
} else {
|
||||
const p = big5[(lead - 0x81) * 157 + b - (b < 0x7f ? 0x40 : 0x62)]
|
||||
lead = 0
|
||||
if (p > 0x1_00_00) {
|
||||
o16[oi++] = p >> 16
|
||||
o16[oi++] = p & 0xff_ff
|
||||
} else if (p) {
|
||||
o16[oi++] = p
|
||||
} else {
|
||||
o16[oi++] = err()
|
||||
if (b < 128) o16[oi++] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line sonarjs/no-identical-functions
|
||||
const decode = (arr, start, end, stream) => {
|
||||
let i = start
|
||||
o16 = new Uint16Array(end - start + (lead ? 1 : 0)) // there are pairs but they consume more than one byte
|
||||
oi = 0
|
||||
|
||||
// Fast path
|
||||
if (!lead) {
|
||||
for (const last1 = end - 1; i < last1; ) {
|
||||
const l = arr[i]
|
||||
if (l < 128) {
|
||||
o16[oi++] = l
|
||||
i++
|
||||
} else {
|
||||
if (l === 0x80 || l === 0xff) break
|
||||
const b = arr[i + 1]
|
||||
if (b < 0x40 || (b > 0x7e && b < 0xa1) || b === 0xff) break
|
||||
const p = big5[(l - 0x81) * 157 + b - (b < 0x7f ? 0x40 : 0x62)]
|
||||
if (p > 0x1_00_00) {
|
||||
o16[oi++] = p >> 16
|
||||
o16[oi++] = p & 0xff_ff
|
||||
} else {
|
||||
if (!p) break
|
||||
o16[oi++] = p
|
||||
}
|
||||
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && i < end) decodeLead(arr[i++])
|
||||
while (i < end) {
|
||||
const b = arr[i++]
|
||||
if (b < 128) {
|
||||
o16[oi++] = b
|
||||
} else if (b === 0x80 || b === 0xff) {
|
||||
o16[oi++] = err()
|
||||
} else {
|
||||
lead = b
|
||||
if (i < end) decodeLead(arr[i++])
|
||||
}
|
||||
}
|
||||
|
||||
if (lead && !stream) {
|
||||
lead = 0
|
||||
o16[oi++] = err()
|
||||
}
|
||||
|
||||
const res = decodeUCS2(o16, oi)
|
||||
o16 = null
|
||||
return res
|
||||
}
|
||||
|
||||
return { decode, isAscii: () => lead === 0 }
|
||||
},
|
||||
}
|
||||
|
||||
export const isAsciiSuperset = (enc) => enc !== 'iso-2022-jp' // all others are ASCII supersets and can use fast path
|
||||
|
||||
export function multibyteDecoder(enc, loose = false) {
|
||||
if (typeof loose !== 'boolean') throw new TypeError('loose option should be boolean')
|
||||
if (!Object.hasOwn(mappers, enc)) throw new RangeError('Unsupported encoding')
|
||||
|
||||
// Input is assumed to be typechecked already
|
||||
let mapper
|
||||
const asciiSuperset = isAsciiSuperset(enc)
|
||||
let streaming // because onErr is cached in mapper
|
||||
const onErr = loose
|
||||
? () => 0xff_fd
|
||||
: () => {
|
||||
// The correct way per spec seems to be not destoying the decoder state in stream mode, even when fatal
|
||||
// Decoders big5, euc-jp, euc-kr, shift_jis, gb18030 / gbk - all clear state before throwing unless EOF, so not affected
|
||||
// iso-2022-jp is the only tricky one one where this !stream check matters in non-stream mode
|
||||
if (!streaming) mapper = null // destroy state, effectively the same as 'do not flush' = false, but early
|
||||
throw new TypeError(E_STRICT)
|
||||
}
|
||||
|
||||
return (arr, stream = false) => {
|
||||
let res = ''
|
||||
if (asciiSuperset && (!mapper || mapper.isAscii?.())) {
|
||||
const prefixLen = asciiPrefix(arr)
|
||||
if (prefixLen === arr.length) return decodeAscii(arr) // ascii
|
||||
res = decodeLatin1(arr, 0, prefixLen) // TODO: check if decodeAscii with subarray is faster for small prefixes too
|
||||
}
|
||||
|
||||
streaming = stream // affects onErr
|
||||
if (!mapper) mapper = mappers[enc](onErr)
|
||||
return res + mapper.decode(arr, res.length, arr.length, stream)
|
||||
}
|
||||
}
|
||||
|
||||
/* Encoders */
|
||||
|
||||
const maps = new Map()
|
||||
const e7 = [[148, 236], [149, 237], [150, 243]] // prettier-ignore
|
||||
const e8 = [[30, 89], [38, 97], [43, 102], [44, 103], [50, 109], [67, 126], [84, 144], [100, 160]] // prettier-ignore
|
||||
const preencoders = {
|
||||
__proto__: null,
|
||||
big5: (p) => ((((p / 157) | 0) + 0x81) << 8) | ((p % 157 < 0x3f ? 0x40 : 0x62) + (p % 157)),
|
||||
shift_jis: (p) => {
|
||||
const l = (p / 188) | 0
|
||||
const t = p % 188
|
||||
return ((l + (l < 0x1f ? 0x81 : 0xc1)) << 8) | ((t < 0x3f ? 0x40 : 0x41) + t)
|
||||
},
|
||||
'iso-2022-jp': (p) => ((((p / 94) | 0) + 0x21) << 8) | ((p % 94) + 0x21),
|
||||
'euc-jp': (p) => ((((p / 94) | 0) + 0xa1) << 8) | ((p % 94) + 0xa1),
|
||||
'euc-kr': (p) => ((((p / 190) | 0) + 0x81) << 8) | ((p % 190) + 0x41),
|
||||
gb18030: (p) => ((((p / 190) | 0) + 0x81) << 8) | ((p % 190 < 0x3f ? 0x40 : 0x41) + (p % 190)),
|
||||
}
|
||||
|
||||
preencoders.gbk = preencoders.gb18030
|
||||
|
||||
// We accept that encoders use non-trivial amount of mem, for perf
|
||||
// most are are 128 KiB mem, big5 is 380 KiB, lazy-loaded at first use
|
||||
function getMap(id, size, ascii) {
|
||||
const cached = maps.get(id)
|
||||
if (cached) return cached
|
||||
let tname = id
|
||||
const sjis = id === 'shift_jis'
|
||||
const iso2022jp = id === 'iso-2022-jp'
|
||||
if (iso2022jp) tname = 'jis0208'
|
||||
if (id === 'gbk') tname = 'gb18030'
|
||||
if (id === 'euc-jp' || sjis) tname = 'jis0208'
|
||||
const table = getTable(tname)
|
||||
const map = new Uint16Array(size)
|
||||
const enc = preencoders[id] || ((p) => p + 1)
|
||||
for (let i = 0; i < table.length; i++) {
|
||||
const c = table[i]
|
||||
if (!c) continue
|
||||
if (id === 'big5') {
|
||||
if (i < 5024) continue // this also skips multi-codepoint strings
|
||||
// In big5, all return first entries except for these
|
||||
if (
|
||||
map[c] &&
|
||||
c !== 0x25_50 &&
|
||||
c !== 0x25_5e &&
|
||||
c !== 0x25_61 &&
|
||||
c !== 0x25_6a &&
|
||||
c !== 0x53_41 &&
|
||||
c !== 0x53_45
|
||||
) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if (sjis && i >= 8272 && i <= 8835) continue
|
||||
if (map[c]) continue
|
||||
}
|
||||
|
||||
if (c > 0xff_ff) {
|
||||
// always a single codepoint here
|
||||
const s = String.fromCharCode(c >> 16, c & 0xff_ff)
|
||||
map[s.codePointAt(0)] = enc(i)
|
||||
} else {
|
||||
map[c] = enc(i)
|
||||
}
|
||||
}
|
||||
|
||||
if (ascii) for (let i = 0; i < 0x80; i++) map[i] = i
|
||||
if (sjis || id === 'euc-jp') {
|
||||
if (sjis) map[0x80] = 0x80
|
||||
const d = sjis ? 0xfe_c0 : 0x70_c0
|
||||
for (let i = 0xff_61; i <= 0xff_9f; i++) map[i] = i - d
|
||||
map[0x22_12] = map[0xff_0d]
|
||||
map[0xa5] = 0x5c
|
||||
map[0x20_3e] = 0x7e
|
||||
} else if (tname === 'gb18030') {
|
||||
if (id === 'gbk') map[0x20_ac] = 0x80
|
||||
for (let i = 0xe7_8d; i <= 0xe7_93; i++) map[i] = i - 0x40_b4
|
||||
for (const [a, b] of e7) map[0xe7_00 | a] = 0xa6_00 | b
|
||||
for (const [a, b] of e8) map[0xe8_00 | a] = 0xfe_00 | b
|
||||
}
|
||||
|
||||
maps.set(id, map)
|
||||
return map
|
||||
}
|
||||
|
||||
const NON_LATIN = /[^\x00-\xFF]/ // eslint-disable-line no-control-regex
|
||||
let gb18030r, katakana
|
||||
|
||||
export function multibyteEncoder(enc, onError) {
|
||||
if (!Object.hasOwn(mappers, enc)) throw new RangeError('Unsupported encoding')
|
||||
const size = enc === 'big5' ? 0x2_f8_a7 : 0x1_00_00 // for big5, max codepoint in table + 1
|
||||
const iso2022jp = enc === 'iso-2022-jp'
|
||||
const gb18030 = enc === 'gb18030'
|
||||
const ascii = isAsciiSuperset(enc)
|
||||
const width = iso2022jp ? 5 : gb18030 ? 4 : 2
|
||||
const tailsize = iso2022jp ? 3 : 0
|
||||
const map = getMap(enc, size, ascii)
|
||||
if (gb18030 && !gb18030r) gb18030r = getTable('gb18030-ranges')
|
||||
if (iso2022jp && !katakana) katakana = getTable('iso-2022-jp-katakana')
|
||||
return (str) => {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (ascii && !NON_LATIN.test(str)) {
|
||||
try {
|
||||
return encodeAscii(str, E_STRICT)
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const length = str.length
|
||||
const u8 = new Uint8Array(length * width + tailsize)
|
||||
let i = 0
|
||||
|
||||
if (ascii) {
|
||||
while (i < length) {
|
||||
const x = str.charCodeAt(i)
|
||||
if (x >= 128) break
|
||||
u8[i++] = x
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line unicorn/consistent-function-scoping
|
||||
const err = (code) => {
|
||||
if (onError) return onError(code, u8, i)
|
||||
throw new TypeError(E_STRICT)
|
||||
}
|
||||
|
||||
if (!map || map.length < size) /* c8 ignore next */ throw new Error('Unreachable') // Important for perf
|
||||
|
||||
if (iso2022jp) {
|
||||
let state = 0 // 0 = ASCII, 1 = Roman, 2 = jis0208
|
||||
const restore = () => {
|
||||
state = 0
|
||||
u8[i++] = 0x1b
|
||||
u8[i++] = 0x28
|
||||
u8[i++] = 0x42
|
||||
}
|
||||
|
||||
for (let j = 0; j < length; j++) {
|
||||
let x = str.charCodeAt(j)
|
||||
if (x >= 0xd8_00 && x < 0xe0_00) {
|
||||
if (state === 2) restore()
|
||||
if (x >= 0xdc_00 || j + 1 === length) {
|
||||
i += err(x) // lone
|
||||
} else {
|
||||
const x1 = str.charCodeAt(j + 1)
|
||||
if (x1 < 0xdc_00 || x1 >= 0xe0_00) {
|
||||
i += err(x) // lone
|
||||
} else {
|
||||
j++ // consume x1
|
||||
i += err(0x1_00_00 + ((x1 & 0x3_ff) | ((x & 0x3_ff) << 10)))
|
||||
}
|
||||
}
|
||||
} else if (x < 0x80) {
|
||||
if (state === 2 || (state === 1 && (x === 0x5c || x === 0x7e))) restore()
|
||||
if (x === 0xe || x === 0xf || x === 0x1b) {
|
||||
i += err(0xff_fd) // 12.2.2. step 3: This returns U+FFFD rather than codePoint to prevent attacks
|
||||
} else {
|
||||
u8[i++] = x
|
||||
}
|
||||
} else if (x === 0xa5 || x === 0x20_3e) {
|
||||
if (state !== 1) {
|
||||
state = 1
|
||||
u8[i++] = 0x1b
|
||||
u8[i++] = 0x28
|
||||
u8[i++] = 0x4a
|
||||
}
|
||||
|
||||
u8[i++] = x === 0xa5 ? 0x5c : 0x7e
|
||||
} else {
|
||||
if (x === 0x22_12) x = 0xff_0d
|
||||
if (x >= 0xff_61 && x <= 0xff_9f) x = katakana[x - 0xff_61]
|
||||
const e = map[x]
|
||||
if (e) {
|
||||
if (state !== 2) {
|
||||
state = 2
|
||||
u8[i++] = 0x1b
|
||||
u8[i++] = 0x24
|
||||
u8[i++] = 0x42
|
||||
}
|
||||
|
||||
u8[i++] = e >> 8
|
||||
u8[i++] = e & 0xff
|
||||
} else {
|
||||
if (state === 2) restore()
|
||||
i += err(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (state) restore()
|
||||
} else if (gb18030) {
|
||||
// Deduping this branch hurts other encoders perf
|
||||
const encode = (cp) => {
|
||||
let a = 0, b = 0 // prettier-ignore
|
||||
for (const [c, d] of gb18030r) {
|
||||
if (d > cp) break
|
||||
a = c
|
||||
b = d
|
||||
}
|
||||
|
||||
let rp = cp === 0xe7_c7 ? 7457 : a + cp - b
|
||||
u8[i++] = 0x81 + ((rp / 12_600) | 0)
|
||||
rp %= 12_600
|
||||
u8[i++] = 0x30 + ((rp / 1260) | 0)
|
||||
rp %= 1260
|
||||
u8[i++] = 0x81 + ((rp / 10) | 0)
|
||||
u8[i++] = 0x30 + (rp % 10)
|
||||
}
|
||||
|
||||
for (let j = i; j < length; j++) {
|
||||
const x = str.charCodeAt(j)
|
||||
if (x >= 0xd8_00 && x < 0xe0_00) {
|
||||
if (x >= 0xdc_00 || j + 1 === length) {
|
||||
i += err(x) // lone
|
||||
} else {
|
||||
const x1 = str.charCodeAt(j + 1)
|
||||
if (x1 < 0xdc_00 || x1 >= 0xe0_00) {
|
||||
i += err(x) // lone
|
||||
} else {
|
||||
j++ // consume x1
|
||||
encode(0x1_00_00 + ((x1 & 0x3_ff) | ((x & 0x3_ff) << 10)))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const e = map[x]
|
||||
if (e & 0xff_00) {
|
||||
u8[i++] = e >> 8
|
||||
u8[i++] = e & 0xff
|
||||
} else if (e || x === 0) {
|
||||
u8[i++] = e
|
||||
} else if (x === 0xe5_e5) {
|
||||
i += err(x)
|
||||
} else {
|
||||
encode(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const long =
|
||||
enc === 'big5'
|
||||
? (x) => {
|
||||
const e = map[x]
|
||||
if (e & 0xff_00) {
|
||||
u8[i++] = e >> 8
|
||||
u8[i++] = e & 0xff
|
||||
} else if (e || x === 0) {
|
||||
u8[i++] = e
|
||||
} else {
|
||||
i += err(x)
|
||||
}
|
||||
}
|
||||
: (x) => {
|
||||
i += err(x)
|
||||
}
|
||||
|
||||
for (let j = i; j < length; j++) {
|
||||
const x = str.charCodeAt(j)
|
||||
if (x >= 0xd8_00 && x < 0xe0_00) {
|
||||
if (x >= 0xdc_00 || j + 1 === length) {
|
||||
i += err(x) // lone
|
||||
} else {
|
||||
const x1 = str.charCodeAt(j + 1)
|
||||
if (x1 < 0xdc_00 || x1 >= 0xe0_00) {
|
||||
i += err(x) // lone
|
||||
} else {
|
||||
j++ // consume x1
|
||||
long(0x1_00_00 + ((x1 & 0x3_ff) | ((x & 0x3_ff) << 10)))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const e = map[x]
|
||||
if (e & 0xff_00) {
|
||||
u8[i++] = e >> 8
|
||||
u8[i++] = e & 0xff
|
||||
} else if (e || x === 0) {
|
||||
u8[i++] = e
|
||||
} else {
|
||||
i += err(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i === u8.length ? u8 : u8.subarray(0, i)
|
||||
}
|
||||
}
|
||||
129
node_modules/@exodus/bytes/fallback/multi-byte.table.js
generated
vendored
Normal file
129
node_modules/@exodus/bytes/fallback/multi-byte.table.js
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
import { fromBase64url } from '@exodus/bytes/base64.js'
|
||||
import { utf16toString } from '@exodus/bytes/utf16.js'
|
||||
import loadEncodings from './multi-byte.encodings.cjs'
|
||||
import { to16input } from './utf16.js'
|
||||
|
||||
export const sizes = {
|
||||
jis0208: 11_104,
|
||||
jis0212: 7211,
|
||||
'euc-kr': 23_750,
|
||||
gb18030: 23_940,
|
||||
big5: 19_782,
|
||||
}
|
||||
|
||||
// This is huge. It's _much_ smaller than https://npmjs.com/text-encoding though
|
||||
// Exactly as mapped by the index table
|
||||
// 0,x - hole of x empty elements
|
||||
// n,c - continious [c, ...] of length n
|
||||
// $.. - references to common chunks
|
||||
// -{x} - same as 1,{x}
|
||||
|
||||
// See tests/multi-byte.test.js to verify that this data decodes exactly into the encoding spec tables
|
||||
|
||||
let indices
|
||||
const tables = new Map()
|
||||
/* eslint-disable @exodus/mutable/no-param-reassign-prop-only */
|
||||
|
||||
function loadBase64(str) {
|
||||
const x = fromBase64url(str)
|
||||
const len = x.length
|
||||
const len2 = len >> 1
|
||||
const y = new Uint8Array(len)
|
||||
let a = -1, b = 0 // prettier-ignore
|
||||
for (let i = 0, j = 0; i < len; i += 2, j++) {
|
||||
a = (a + x[j] + 1) & 0xff
|
||||
b = (b + x[len2 + j]) & 0xff
|
||||
y[i] = a
|
||||
y[i + 1] = b
|
||||
}
|
||||
|
||||
return y
|
||||
}
|
||||
|
||||
function unwrap(res, t, pos, highMode = false) {
|
||||
let code = 0
|
||||
for (let i = 0; i < t.length; i++) {
|
||||
let x = t[i]
|
||||
if (typeof x === 'number') {
|
||||
if (x === 0) {
|
||||
pos += t[++i]
|
||||
} else {
|
||||
if (x < 0) {
|
||||
code -= x
|
||||
x = 1
|
||||
} else {
|
||||
code += t[++i]
|
||||
}
|
||||
|
||||
if (highMode) {
|
||||
for (let k = 0; k < x; k++, pos++, code++) {
|
||||
if (code <= 0xff_ff) {
|
||||
res[pos] = code
|
||||
} else {
|
||||
const c = String.fromCodePoint(code)
|
||||
res[pos] = (c.charCodeAt(0) << 16) | c.charCodeAt(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (let k = 0; k < x; k++, pos++, code++) res[pos] = code
|
||||
}
|
||||
}
|
||||
} else if (x[0] === '$' && Object.hasOwn(indices, x)) {
|
||||
pos = unwrap(res, indices[x], pos, highMode) // self-reference using shared chunks
|
||||
} else if (highMode) {
|
||||
const s = [...utf16toString(loadBase64(x), 'uint8-le')] // splits by codepoints
|
||||
let c
|
||||
for (let i = 0; i < s.length; ) {
|
||||
c = s[i++]
|
||||
res[pos++] = c.length === 1 ? c.charCodeAt(0) : (c.charCodeAt(0) << 16) | c.charCodeAt(1)
|
||||
}
|
||||
|
||||
code = c.codePointAt(0) + 1
|
||||
} else {
|
||||
const u16 = to16input(loadBase64(x), true) // data is little-endian
|
||||
res.set(u16, pos)
|
||||
pos += u16.length
|
||||
code = u16[u16.length - 1] + 1
|
||||
}
|
||||
}
|
||||
|
||||
return pos
|
||||
}
|
||||
|
||||
export function getTable(id) {
|
||||
const cached = tables.get(id)
|
||||
if (cached) return cached
|
||||
|
||||
if (!indices) indices = loadEncodings() // lazy-load
|
||||
if (!Object.hasOwn(indices, id)) throw new Error('Unknown encoding')
|
||||
if (!indices[id]) throw new Error('Table already used (likely incorrect bundler dedupe)')
|
||||
|
||||
let res
|
||||
if (id.endsWith('-ranges')) {
|
||||
res = []
|
||||
let a = 0, b = 0 // prettier-ignore
|
||||
const idx = indices[id]
|
||||
while (idx.length > 0) res.push([(a += idx.shift()), (b += idx.shift())]) // destroying, we remove it later anyway
|
||||
} else if (id.endsWith('-katakana')) {
|
||||
let a = -1
|
||||
res = new Uint16Array(indices[id].map((x) => (a += x + 1)))
|
||||
} else if (id === 'big5') {
|
||||
if (!Object.hasOwn(sizes, id)) throw new Error('Unknown encoding')
|
||||
res = new Uint32Array(sizes[id]) // array of strings or undefined
|
||||
unwrap(res, indices[id], 0, true)
|
||||
// Pointer code updates are embedded into the table
|
||||
// These are skipped in encoder as encoder uses only pointers >= (0xA1 - 0x81) * 157
|
||||
res[1133] = 0xca_03_04
|
||||
res[1135] = 0xca_03_0c
|
||||
res[1164] = 0xea_03_04
|
||||
res[1166] = 0xea_03_0c
|
||||
} else {
|
||||
if (!Object.hasOwn(sizes, id)) throw new Error('Unknown encoding')
|
||||
res = new Uint16Array(sizes[id])
|
||||
unwrap(res, indices[id], 0, false)
|
||||
}
|
||||
|
||||
indices[id] = null // gc
|
||||
tables.set(id, res)
|
||||
return res
|
||||
}
|
||||
31
node_modules/@exodus/bytes/fallback/percent.js
generated
vendored
Normal file
31
node_modules/@exodus/bytes/fallback/percent.js
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
import { decodeAscii, encodeLatin1 } from './latin1.js'
|
||||
import { decode2string } from './_utils.js'
|
||||
|
||||
const ERR = 'percentEncodeSet must be a string of unique increasing codepoints in range 0x20 - 0x7e'
|
||||
const percentMap = new Map()
|
||||
let hex, base
|
||||
|
||||
export function percentEncoder(set, spaceAsPlus = false) {
|
||||
if (typeof set !== 'string' || /[^\x20-\x7E]/.test(set)) throw new TypeError(ERR)
|
||||
if (typeof spaceAsPlus !== 'boolean') throw new TypeError('spaceAsPlus must be boolean')
|
||||
const id = set + +spaceAsPlus
|
||||
const cached = percentMap.get(id)
|
||||
if (cached) return cached
|
||||
|
||||
const n = encodeLatin1(set).sort() // string checked above to be ascii
|
||||
if (decodeAscii(n) !== set || new Set(n).size !== n.length) throw new TypeError(ERR)
|
||||
|
||||
if (!base) {
|
||||
hex = Array.from({ length: 256 }, (_, i) => `%${i.toString(16).padStart(2, '0').toUpperCase()}`)
|
||||
base = hex.map((h, i) => (i < 0x20 || i > 0x7e ? h : String.fromCharCode(i)))
|
||||
}
|
||||
|
||||
const map = base.slice() // copy
|
||||
for (const c of n) map[c] = hex[c]
|
||||
if (spaceAsPlus) map[0x20] = '+' // overrides whatever percentEncodeSet thinks about it
|
||||
|
||||
// Input is not typechecked, for internal use only
|
||||
const percentEncode = (u8, start = 0, end = u8.length) => decode2string(u8, start, end, map)
|
||||
percentMap.set(id, percentEncode)
|
||||
return percentEncode
|
||||
}
|
||||
82
node_modules/@exodus/bytes/fallback/single-byte.encodings.js
generated
vendored
Normal file
82
node_modules/@exodus/bytes/fallback/single-byte.encodings.js
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// See tests/encoding/fixtures/single-byte/dump.js for generator
|
||||
|
||||
const r = 0xff_fd
|
||||
const e = (x) => new Array(x).fill(1)
|
||||
const h = (x) => new Array(x).fill(r)
|
||||
|
||||
/* eslint-disable unicorn/numeric-separators-style, @exodus/export-default/named */
|
||||
|
||||
// Common ranges
|
||||
|
||||
// prettier-ignore
|
||||
const i2 = [-40,-147,1,64,-62,117,-51,-63,69,-67,79,-77,79,-77,1,64,2,51,4,-116,1,124,-122,1,129,22,-148,150,-148,1,133,-131,118,-116,1,33,-31,86,-51,-32,38,-36,48,-46,48,-46,1,33,2,51,4,-85,1,93,-91,1,98,22,-117,119,-117,1,102,374]
|
||||
const i4a = [-75, -63, e(5), 104, -34, -67, 79, -77, 75, -73, 1]
|
||||
const i4b = [34, -32, e(5), 73, -34, -36, 48, -46, 44, -42, 1]
|
||||
const i7 = [721, 1, 1, -719, 721, -719, 721, e(19), r, 2, e(43), r]
|
||||
const i8 = [e(26), r, r, 6692, 1, r]
|
||||
const i9 = [79, -77, e(11), 84, 46, -127, e(16), 48, -46, e(11), 53, 46]
|
||||
const iB = [3425, e(57), h(4), 5, e(28), h(4)]
|
||||
const p2 = [-99, 12, 20, -12, 17, 37, -29, 2]
|
||||
const p1 = [8237, -8235, 8089, -7816, 7820, 8, -6, 1]
|
||||
const w0 = [8237, -8235, 8089, -8087, 8091, 8, -6, 1, -8089, 8104]
|
||||
const w8 = [8072, 1, 3, 1, 5, -15, 1]
|
||||
const w1 = [w8, -7480, 7750, -8129, 7897, -7911, -182]
|
||||
const w3 = [w8, -8060, 8330, -8328, 8096, -8094]
|
||||
const m0 = [8558, -8328, 8374, -66, -8539, 16, 8043, -8070]
|
||||
// prettier-ignore
|
||||
const p3 = [1,1,65,-63,158,-156,1,1,1,40,30,42,-46,6,-66,1,83,-6,-6,-67,176,p2,-114,121,-119,1,1,155,-49,25,16,-142,159,2,-158,38,42,-46,6,-35,1,52,-6,-6,-36,145,p2,-83,90,-88,1,1,124,-49,25,16,-111,128,2]
|
||||
// prettier-ignore
|
||||
const k8a = [9345,2,10,4,4,4,4,8,8,8,8,68,4,4,4,4,1,1,1,-627,640,-903,1,46,28,1,-8645,8833,-8817,2,5,64,9305,1,1,-8449]
|
||||
// prettier-ignore
|
||||
const k8b = [-30,1,21,-18,1,15,-17,18,-13,e(7),16,-15,1,1,1,-13,-4,26,-1,-20,17,5,-4,-2,3]
|
||||
|
||||
// prettier-ignore
|
||||
const maps = {
|
||||
ibm866: [913,e(47),8530,1,1,-145,34,61,1,-12,-1,14,-18,6,6,-1,-1,-75,4,32,-8,-16,-28,60,34,1,-5,-6,21,-3,-6,-16,28,-5,1,-4,1,-12,-1,-6,1,24,-1,-82,-12,124,-4,8,4,-16,-8512,e(15),-78,80,-77,80,-77,80,-73,80,-942,8553,-8546,8547,-260,-8306,9468,-9472],
|
||||
'koi8-r': [k8a,8450,e(14),-8544,8545,e(10),-9411,933,k8b,-28,k8b],
|
||||
'koi8-u': [k8a,3,8448,-8446,1,8448,1,1,1,1,-8394,-51,8448,1,1,1,-8544,3,8543,-8541,1,8543,1,1,1,1,-8410,-130,-869,933,k8b,-28,k8b],
|
||||
'x-mac-cyrillic': [913,e(31),7153,-8048,992,-1005,4,8059,-8044,848,-856,-5,8313,-7456,80,7694,-7773,80,7627,-8557,8627,1,-7695,-929,988,-137,-4,80,-77,80,-78,80,-79,80,-2,-83,-857,m0,875,80,-79,80,-7,7102,1,8,1,-5,1,-7970,7975,-7184,80,-79,80,7351,-7445,80,-2,-31,e(30),7262],
|
||||
macintosh: [69,1,2,2,8,5,6,5,-1,2,2,-1,2,2,2,-1,2,1,2,-1,2,1,2,2,-1,2,2,-1,5,-1,2,1,7972,-8048,-14,1,4,8059,-8044,41,-49,-5,8313,-8302,-12,8632,-8602,18,8518,-8557,8627,1,-8640,16,8525,15,-2,-7759,7787,-8577,16,751,-707,18,-57,-30,11,m0,32,3,18,125,1,7872,1,8,1,-5,1,-7970,9427,-9419,121,7884,104,-115,1,56007,1,-56033,-8042,8035,4,18,-8046,8,-9,10,-3,5,1,1,-3,7,1,63531,-63533,8,1,-2,88,405,22,-557,553,1,1,-546,549,-2,-20],
|
||||
'windows-874': [8237,-8235,1,1,1,8098,-8096,e(10),w8,-8060,e(8),iB],
|
||||
}
|
||||
|
||||
// windows-1250 - windows-1258
|
||||
// prettier-ignore
|
||||
;[
|
||||
[w0,-7888,7897,-7903,10,25,-4,-233,w8,-8060,8330,-8129,7897,-7903,10,25,-4,-218,551,17,-407,-157,96,-94,1,1,1,181,-179,1,1,1,205,-203,1,554,-409,-142,1,1,1,1,77,90,-164,130,416,-415,62,i2],
|
||||
[899,1,7191,-7111,7115,8,-6,1,139,-124,-7207,7216,-7215,2,-1,4,67,7110,1,3,1,5,-15,1,-8060,8330,-7369,7137,-7136,2,-1,4,-959,878,80,-86,-868,1004,-1002,1,858,-856,859,-857,1,1,1,857,-855,1,853,80,59,-988,1,1,922,7365,-7362,-921,925,-83,80,2,-71,e(63)],
|
||||
[p1,-7515,7530,-7888,7897,-7911,-197,240,-238,1,w1,225,-6],
|
||||
[p1,-8089,8104,-8102,8111,-8109,1,1,1,1,w3,1,1,1,1,741,1,-739,e(6),r,2,1,1,1,8039,-8037,1,1,1,721,-719,1,1,i7],
|
||||
[p1,-7515,7530,-7888,7897,-7911,-197,1,1,1,w1,1,218,-216,e(47),i9],
|
||||
[p1,-7515,7530,-8102,8111,-8109,1,1,1,1,w8,-7480,7750,-8328,8096,-8094,e(7),8199,-8197,1,1,1,1,46,-44,e(14),62,-60,1,1,1,1,1265,e(19),45,1,1,1,1,h(7),-36,i8],
|
||||
[8237,-6702,6556,-7816,7820,8,-6,1,-7515,7530,-6583,6592,-7911,1332,18,-16,39,6505,1,3,1,5,-15,1,-6507,6777,-6801,6569,-7911,7865,1,-6483,-1562,1388,-1386,e(7),1557,-1555,e(14),1378,-1376,1,1,1,1377,162,-160,e(21),-1375,1376,1,1,1,6,1,1,1,-1379,1380,-1378,1379,1,1,1,-1377,1,1,1,1,1374,1,-1372,1,1372,1,1,1,-1370,1371,1,-1369,1370,-1368,1369,-1367,1,7954,1,-6461],
|
||||
[w0,-8102,8111,-8109,28,543,-527,-40,w3,19,556,-572,1,r,2,1,1,r,2,1,49,-47,173,-171,1,1,1,24,-22,e(5),p3,347],
|
||||
[p1,-7515,7530,-8102,8111,-7911,-197,1,1,1,w8,-7480,7750,-8328,8096,-7911,-182,1,218,-216,e(34),64,-62,e(7),565,-563,1,1,65,-63,568,-566,1,204,-202,e(6),211,340,-548,1,1,1,33,-31,e(7),534,-532,1,1,34,-32,562,-560,1,173,-171,e(6),180,7931],
|
||||
].forEach((m, i) => {
|
||||
maps[`windows-${i + 1250}`] = m
|
||||
});
|
||||
|
||||
// iso-8859-1 - iso-8859-16
|
||||
// prettier-ignore
|
||||
;[
|
||||
[], // Actual Latin1 / Unicode subset, non-WHATWG, which maps iso-8859-1 to windows-1252
|
||||
[100,468,-407,-157,153,29,-179,1,184,-2,6,21,-204,208,-2,-203,85,470,-409,-142,138,29,364,-527,169,-2,6,21,355,-351,-2,i2],
|
||||
[134,434,-565,1,r,128,-125,1,136,46,-64,22,-135,r,206,-203,119,-117,1,1,1,112,-110,1,121,46,-64,22,-120,r,191,-188,1,1,r,2,70,-2,-65,e(8),r,2,1,1,1,76,-74,1,69,-67,1,1,1,144,-16,-125,1,1,1,r,2,39,-2,-34,e(8),r,2,1,1,1,45,-43,1,38,-36,1,1,1,113,-16,380],
|
||||
[100,52,30,-178,132,19,-148,1,184,-78,16,68,-185,208,-206,1,85,470,-388,-163,117,19,395,-527,169,-78,16,68,-29,52,-51,i4a,92,-26,53,7,-22,-98,1,1,1,1,154,-152,1,1,140,2,-139,i4b,61,-26,53,7,-22,-67,1,1,1,1,123,-121,1,1,109,2,366],
|
||||
[865,e(11),-863,865,e(65),7367,-7365,e(11),-949,951,1],
|
||||
[r,r,r,4,h(7),1384,-1375,h(13),1390,r,r,r,4,r,2,e(25),h(5),6,e(18),h(13)],
|
||||
[8056,1,-8054,8201,3,-8201,1,1,1,721,-719,1,1,r,8040,-8037,1,1,1,721,1,1,-719,i7],
|
||||
[r,2,e(7),46,-44,e(14),62,-60,1,1,1,h(32),8025,-6727,i8],
|
||||
[e(47),i9], // non-WHATWG, which maps iso-8859-9 to windows-1254
|
||||
[100,14,16,8,-2,14,-143,148,-43,80,6,23,-208,189,-32,-154,85,14,16,8,-2,14,-128,133,-43,80,6,23,7831,-7850,-32,i4a,1,1,117,7,-121,1,1,1,146,-144,154,-152,e(5),i4b,1,1,86,7,-90,1,1,1,115,-113,123,-121,1,1,1,1,58],
|
||||
iB, // non-WHATWG, which maps iso-8859-11 to windows-874
|
||||
null, // no 12
|
||||
[8061,-8059,1,1,8058,-8056,1,49,-47,173,-171,1,1,1,24,-22,1,1,1,8041,-8039,p3,7835],
|
||||
[7522,1,-7520,103,1,7423,-7523,7641,-7639,7641,-119,231,-7749,1,202,7334,1,-7423,1,7455,1,-7563,7584,43,-42,44,-35,147,-111,1,-36,-7585,e(15),165,-163,e(5),7572,-7570,e(5),153,-151,e(16),134,-132,e(5),7541,-7539,e(5),122],
|
||||
[1,1,1,8201,-8199,187,-185,186,-184,e(10),202,-200,1,1,199,-197,1,1,151,1,37],
|
||||
[100,1,60,8043,-142,-7870,-185,186,-184,367,-365,206,-204,205,1,-203,1,91,54,59,7840,-8039,1,199,-113,268,-350,151,1,37,4,-188,1,1,64,-62,66,-64,e(9),65,51,-113,1,1,124,-122,132,22,-151,1,1,1,60,258,-315,1,1,1,33,-31,35,-33,e(9),34,51,-82,1,1,93,-91,101,22,-120,1,1,1,29,258],
|
||||
].forEach((m, i) => {
|
||||
if (m) maps[`iso-8859-${i + 1}`] = [e(33), m]
|
||||
})
|
||||
|
||||
export default maps
|
||||
110
node_modules/@exodus/bytes/fallback/single-byte.js
generated
vendored
Normal file
110
node_modules/@exodus/bytes/fallback/single-byte.js
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
import { asciiPrefix, decodeAscii, decodeLatin1 } from './latin1.js'
|
||||
import encodings from './single-byte.encodings.js'
|
||||
import { decode2string, nativeDecoder } from './_utils.js'
|
||||
|
||||
export const E_STRICT = 'Input is not well-formed for this encoding'
|
||||
const xUserDefined = 'x-user-defined'
|
||||
const iso8i = 'iso-8859-8-i'
|
||||
|
||||
export const assertEncoding = (encoding) => {
|
||||
if (Object.hasOwn(encodings, encoding) || encoding === xUserDefined || encoding === iso8i) return
|
||||
throw new RangeError('Unsupported encoding')
|
||||
}
|
||||
|
||||
const r = 0xff_fd
|
||||
|
||||
export function getEncoding(encoding) {
|
||||
assertEncoding(encoding)
|
||||
if (encoding === xUserDefined) return Array.from({ length: 128 }, (_, i) => 0xf7_80 + i)
|
||||
if (encoding === iso8i) encoding = 'iso-8859-8'
|
||||
let prev = 127
|
||||
const enc = encodings[encoding].flat().flat().flat() // max depth is 3, rechecked by tests
|
||||
return enc.map((x) => (x === r ? x : (prev += x))) // eslint-disable-line no-return-assign
|
||||
}
|
||||
|
||||
const mappers = new Map()
|
||||
const decoders = new Map()
|
||||
const encmaps = new Map()
|
||||
|
||||
// Used only on Node.js, no reason to optimize for anything else
|
||||
// E.g. avoiding .from and filling zero-initialized arr manually is faster on Hermes, but we avoid this codepath on Hermes completely
|
||||
export function encodingMapper(encoding) {
|
||||
const cached = mappers.get(encoding)
|
||||
if (cached) return cached
|
||||
|
||||
const codes = getEncoding(encoding)
|
||||
const incomplete = codes.includes(r)
|
||||
let map
|
||||
const mapper = (arr, start = 0) => {
|
||||
if (!map) {
|
||||
map = new Uint16Array(256).map((_, i) => i) // Unicode subset
|
||||
map.set(Uint16Array.from(codes), 128)
|
||||
}
|
||||
|
||||
const o = Uint16Array.from(start === 0 ? arr : arr.subarray(start)) // copy to modify in-place, also those are 16-bit now
|
||||
let i = 0
|
||||
for (const end7 = o.length - 7; i < end7; i += 8) {
|
||||
o[i] = map[o[i]]
|
||||
o[i + 1] = map[o[i + 1]]
|
||||
o[i + 2] = map[o[i + 2]]
|
||||
o[i + 3] = map[o[i + 3]]
|
||||
o[i + 4] = map[o[i + 4]]
|
||||
o[i + 5] = map[o[i + 5]]
|
||||
o[i + 6] = map[o[i + 6]]
|
||||
o[i + 7] = map[o[i + 7]]
|
||||
}
|
||||
|
||||
for (const end = o.length; i < end; i++) o[i] = map[o[i]]
|
||||
return o
|
||||
}
|
||||
|
||||
mappers.set(encoding, { mapper, incomplete })
|
||||
return { mapper, incomplete }
|
||||
}
|
||||
|
||||
export function encodingDecoder(encoding) {
|
||||
const cached = decoders.get(encoding)
|
||||
if (cached) return cached
|
||||
const isLatin1 = encoding === 'iso-8859-1'
|
||||
if (isLatin1 && !nativeDecoder) return (arr, loose = false) => decodeLatin1(arr) // native decoder is faster for ascii below
|
||||
|
||||
let strings
|
||||
const codes = getEncoding(encoding)
|
||||
const incomplete = codes.includes(r)
|
||||
const decoder = (arr, loose = false) => {
|
||||
if (!strings) {
|
||||
const allCodes = Array.from({ length: 128 }, (_, i) => i).concat(codes)
|
||||
while (allCodes.length < 256) allCodes.push(allCodes.length)
|
||||
strings = allCodes.map((c) => String.fromCharCode(c))
|
||||
}
|
||||
|
||||
const prefixLen = asciiPrefix(arr)
|
||||
if (prefixLen === arr.length) return decodeAscii(arr)
|
||||
if (isLatin1) return decodeLatin1(arr) // TODO: check if decodeAscii with subarray is faster for small prefixes too
|
||||
const prefix = decodeLatin1(arr, 0, prefixLen) // TODO: check if decodeAscii with subarray is faster for small prefixes too
|
||||
const suffix = decode2string(arr, prefix.length, arr.length, strings)
|
||||
if (!loose && incomplete && suffix.includes('\uFFFD')) throw new TypeError(E_STRICT)
|
||||
return prefix + suffix
|
||||
}
|
||||
|
||||
decoders.set(encoding, decoder)
|
||||
return decoder
|
||||
}
|
||||
|
||||
export function encodeMap(encoding) {
|
||||
const cached = encmaps.get(encoding)
|
||||
if (cached) return cached
|
||||
|
||||
const codes = getEncoding(encoding)
|
||||
let max = 128
|
||||
while (codes.length < 128) codes.push(128 + codes.length)
|
||||
for (const code of codes) if (code > max && code !== r) max = code
|
||||
const map = new Uint8Array(max + 1) // < 10 KiB for all except macintosh, 63 KiB for macintosh
|
||||
for (let i = 0; i < 128; i++) {
|
||||
map[i] = i
|
||||
if (codes[i] !== r) map[codes[i]] = 128 + i
|
||||
}
|
||||
|
||||
encmaps.set(encoding, map)
|
||||
return map
|
||||
}
|
||||
199
node_modules/@exodus/bytes/fallback/utf16.js
generated
vendored
Normal file
199
node_modules/@exodus/bytes/fallback/utf16.js
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
import { decodeUCS2, encodeCharcodes } from './latin1.js'
|
||||
import { isLE } from './_utils.js'
|
||||
|
||||
export const E_STRICT = 'Input is not well-formed utf16'
|
||||
export const E_STRICT_UNICODE = 'Input is not well-formed Unicode'
|
||||
|
||||
const replacementCodepoint = 0xff_fd
|
||||
const replacementCodepointSwapped = 0xfd_ff
|
||||
|
||||
const to16 = (a) => new Uint16Array(a.buffer, a.byteOffset, a.byteLength / 2) // Requires checked length and alignment!
|
||||
|
||||
export function to16input(u8, le) {
|
||||
// Assume even number of bytes
|
||||
if (le === isLE) return to16(u8.byteOffset % 2 === 0 ? u8 : Uint8Array.from(u8))
|
||||
return to16(swap16(Uint8Array.from(u8)))
|
||||
}
|
||||
|
||||
export const decode = (u16, loose = false, checked = false) => {
|
||||
if (checked || isWellFormed(u16)) return decodeUCS2(u16)
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
return decodeUCS2(toWellFormed(Uint16Array.from(u16))) // cloned for replacement
|
||||
}
|
||||
|
||||
export function encode(str, loose = false, checked = false, swapped = false) {
|
||||
const arr = new Uint16Array(str.length)
|
||||
if (checked) return swapped ? encodeCheckedSwapped(str, arr) : encodeChecked(str, arr)
|
||||
return swapped ? encodeUncheckedSwapped(str, arr, loose) : encodeUnchecked(str, arr, loose)
|
||||
}
|
||||
|
||||
/* eslint-disable @exodus/mutable/no-param-reassign-prop-only */
|
||||
|
||||
// Assumes checked length % 2 === 0, otherwise does not swap tail
|
||||
function swap16(u8) {
|
||||
let i = 0
|
||||
for (const last3 = u8.length - 3; i < last3; i += 4) {
|
||||
const x0 = u8[i]
|
||||
const x1 = u8[i + 1]
|
||||
const x2 = u8[i + 2]
|
||||
const x3 = u8[i + 3]
|
||||
u8[i] = x1
|
||||
u8[i + 1] = x0
|
||||
u8[i + 2] = x3
|
||||
u8[i + 3] = x2
|
||||
}
|
||||
|
||||
for (const last = u8.length - 1; i < last; i += 2) {
|
||||
const x0 = u8[i]
|
||||
const x1 = u8[i + 1]
|
||||
u8[i] = x1
|
||||
u8[i + 1] = x0
|
||||
}
|
||||
|
||||
return u8
|
||||
}
|
||||
|
||||
// Splitting paths into small functions helps (at least on SpiderMonkey)
|
||||
|
||||
const encodeChecked = (str, arr) => encodeCharcodes(str, arr) // Same as encodeLatin1, but with Uint16Array
|
||||
|
||||
function encodeCheckedSwapped(str, arr) {
|
||||
// TODO: faster path for Hermes? See encodeCharcodes
|
||||
const length = str.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const x = str.charCodeAt(i)
|
||||
arr[i] = ((x & 0xff) << 8) | (x >> 8)
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
|
||||
// lead: d800 - dbff, trail: dc00 - dfff
|
||||
|
||||
function encodeUnchecked(str, arr, loose = false) {
|
||||
// TODO: faster path for Hermes? See encodeCharcodes
|
||||
const length = str.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const code = str.charCodeAt(i)
|
||||
arr[i] = code
|
||||
if (code >= 0xd8_00 && code < 0xe0_00) {
|
||||
// An unexpected trail or a lead at the very end of input
|
||||
if (code > 0xdb_ff || i + 1 >= length) {
|
||||
if (!loose) throw new TypeError(E_STRICT_UNICODE)
|
||||
arr[i] = replacementCodepoint
|
||||
} else {
|
||||
const next = str.charCodeAt(i + 1) // Process valid pairs immediately
|
||||
if (next < 0xdc_00 || next >= 0xe0_00) {
|
||||
if (!loose) throw new TypeError(E_STRICT_UNICODE)
|
||||
arr[i] = replacementCodepoint
|
||||
} else {
|
||||
i++ // consume next
|
||||
arr[i] = next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
|
||||
function encodeUncheckedSwapped(str, arr, loose = false) {
|
||||
// TODO: faster path for Hermes? See encodeCharcodes
|
||||
const length = str.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const code = str.charCodeAt(i)
|
||||
arr[i] = ((code & 0xff) << 8) | (code >> 8)
|
||||
if (code >= 0xd8_00 && code < 0xe0_00) {
|
||||
// An unexpected trail or a lead at the very end of input
|
||||
if (code > 0xdb_ff || i + 1 >= length) {
|
||||
if (!loose) throw new TypeError(E_STRICT_UNICODE)
|
||||
arr[i] = replacementCodepointSwapped
|
||||
} else {
|
||||
const next = str.charCodeAt(i + 1) // Process valid pairs immediately
|
||||
if (next < 0xdc_00 || next >= 0xe0_00) {
|
||||
if (!loose) throw new TypeError(E_STRICT_UNICODE)
|
||||
arr[i] = replacementCodepointSwapped
|
||||
} else {
|
||||
i++ // consume next
|
||||
arr[i] = ((next & 0xff) << 8) | (next >> 8)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
|
||||
// Only needed on Hermes, everything else has native impl
|
||||
export function toWellFormed(u16) {
|
||||
const length = u16.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const code = u16[i]
|
||||
if (code >= 0xd8_00 && code < 0xe0_00) {
|
||||
// An unexpected trail or a lead at the very end of input
|
||||
if (code > 0xdb_ff || i + 1 >= length) {
|
||||
u16[i] = replacementCodepoint
|
||||
} else {
|
||||
const next = u16[i + 1] // Process valid pairs immediately
|
||||
if (next < 0xdc_00 || next >= 0xe0_00) {
|
||||
u16[i] = replacementCodepoint
|
||||
} else {
|
||||
i++ // consume next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return u16
|
||||
}
|
||||
|
||||
// Only needed on Hermes, everything else has native impl
|
||||
export function isWellFormed(u16) {
|
||||
const length = u16.length
|
||||
let i = 0
|
||||
|
||||
const m = 0x80_00_80_00
|
||||
const l = 0xd8_00
|
||||
const h = 0xe0_00
|
||||
|
||||
// Speedup with u32, by skipping to the first surrogate
|
||||
// Only implemented for aligned input for now, but almost all input is aligned (pooled Buffer or 0 offset)
|
||||
if (length > 32 && u16.byteOffset % 4 === 0) {
|
||||
const u32length = (u16.byteLength / 4) | 0
|
||||
const u32 = new Uint32Array(u16.buffer, u16.byteOffset, u32length)
|
||||
for (const last3 = u32length - 3; ; i += 4) {
|
||||
if (i >= last3) break // loop is fast enough for moving this here to be _very_ useful, likely due to array access checks
|
||||
const a = u32[i]
|
||||
const b = u32[i + 1]
|
||||
const c = u32[i + 2]
|
||||
const d = u32[i + 3]
|
||||
if (a & m || b & m || c & m || d & m) break // bitwise OR does not make this faster on Hermes
|
||||
}
|
||||
|
||||
for (; i < u32length; i++) if (u32[i] & m) break
|
||||
i *= 2
|
||||
}
|
||||
|
||||
// An extra loop gives ~30-40% speedup e.g. on English text without surrogates but with other symbols above 0x80_00
|
||||
for (const last3 = length - 3; ; i += 4) {
|
||||
if (i >= last3) break
|
||||
const a = u16[i]
|
||||
const b = u16[i + 1]
|
||||
const c = u16[i + 2]
|
||||
const d = u16[i + 3]
|
||||
if ((a >= l && a < h) || (b >= l && b < h) || (c >= l && c < h) || (d >= l && d < h)) break
|
||||
}
|
||||
|
||||
for (; i < length; i++) {
|
||||
const code = u16[i]
|
||||
if (code >= l && code < h) {
|
||||
// An unexpected trail or a lead at the very end of input
|
||||
if (code >= 0xdc_00 || i + 1 >= length) return false
|
||||
i++ // consume next
|
||||
const next = u16[i] // Process valid pairs immediately
|
||||
if (next < 0xdc_00 || next >= h) return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
248
node_modules/@exodus/bytes/fallback/utf8.js
generated
vendored
Normal file
248
node_modules/@exodus/bytes/fallback/utf8.js
generated
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
import { encodeAsciiPrefix } from './latin1.js'
|
||||
|
||||
export const E_STRICT = 'Input is not well-formed utf8'
|
||||
export const E_STRICT_UNICODE = 'Input is not well-formed Unicode'
|
||||
|
||||
const replacementPoint = 0xff_fd
|
||||
|
||||
// https://encoding.spec.whatwg.org/#utf-8-decoder
|
||||
// We are most likely in loose mode, for non-loose escape & decodeURIComponent solved everything
|
||||
export function decode(arr, loose, start = 0) {
|
||||
start |= 0
|
||||
const end = arr.length
|
||||
let out = ''
|
||||
const chunkSize = 0x2_00 // far below MAX_ARGUMENTS_LENGTH in npmjs.com/buffer, we use smaller chunks
|
||||
const tmpSize = Math.min(end - start, chunkSize + 1) // need 1 extra slot for last codepoint, which can be 2 charcodes
|
||||
const tmp = new Array(tmpSize).fill(0)
|
||||
let ti = 0
|
||||
|
||||
for (let i = start; i < end; i++) {
|
||||
if (ti >= chunkSize) {
|
||||
tmp.length = ti // can be larger by 1 if last codepoint is two charcodes
|
||||
out += String.fromCharCode.apply(String, tmp)
|
||||
if (tmp.length <= chunkSize) tmp.push(0) // restore 1 extra slot for last codepoint
|
||||
ti = 0
|
||||
}
|
||||
|
||||
const byte = arr[i]
|
||||
if (byte < 0x80) {
|
||||
tmp[ti++] = byte
|
||||
// ascii fast path is in ../utf8.js, this is called only on non-ascii input
|
||||
// so we don't unroll this anymore
|
||||
} else if (byte < 0xc2) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
} else if (byte < 0xe0) {
|
||||
// need 1 more
|
||||
if (i + 1 >= end) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
break
|
||||
}
|
||||
|
||||
const byte1 = arr[i + 1]
|
||||
if (byte1 < 0x80 || byte1 > 0xbf) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
tmp[ti++] = ((byte & 0x1f) << 6) | (byte1 & 0x3f)
|
||||
} else if (byte < 0xf0) {
|
||||
// need 2 more
|
||||
if (i + 1 >= end) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
break
|
||||
}
|
||||
|
||||
const lower = byte === 0xe0 ? 0xa0 : 0x80
|
||||
const upper = byte === 0xed ? 0x9f : 0xbf
|
||||
const byte1 = arr[i + 1]
|
||||
if (byte1 < lower || byte1 > upper) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
if (i + 1 >= end) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
break
|
||||
}
|
||||
|
||||
const byte2 = arr[i + 1]
|
||||
if (byte2 < 0x80 || byte2 > 0xbf) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
tmp[ti++] = ((byte & 0xf) << 12) | ((byte1 & 0x3f) << 6) | (byte2 & 0x3f)
|
||||
} else if (byte <= 0xf4) {
|
||||
// need 3 more
|
||||
if (i + 1 >= end) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
break
|
||||
}
|
||||
|
||||
const lower = byte === 0xf0 ? 0x90 : 0x80
|
||||
const upper = byte === 0xf4 ? 0x8f : 0xbf
|
||||
const byte1 = arr[i + 1]
|
||||
if (byte1 < lower || byte1 > upper) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
if (i + 1 >= end) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
break
|
||||
}
|
||||
|
||||
const byte2 = arr[i + 1]
|
||||
if (byte2 < 0x80 || byte2 > 0xbf) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
if (i + 1 >= end) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
break
|
||||
}
|
||||
|
||||
const byte3 = arr[i + 1]
|
||||
if (byte3 < 0x80 || byte3 > 0xbf) {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
const codePoint =
|
||||
((byte & 0xf) << 18) | ((byte1 & 0x3f) << 12) | ((byte2 & 0x3f) << 6) | (byte3 & 0x3f)
|
||||
if (codePoint > 0xff_ff) {
|
||||
// split into char codes as String.fromCharCode is faster than String.fromCodePoint
|
||||
const u = codePoint - 0x1_00_00
|
||||
tmp[ti++] = 0xd8_00 + ((u >> 10) & 0x3_ff)
|
||||
tmp[ti++] = 0xdc_00 + (u & 0x3_ff)
|
||||
} else {
|
||||
tmp[ti++] = codePoint
|
||||
}
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
} else {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
tmp[ti++] = replacementPoint
|
||||
}
|
||||
}
|
||||
|
||||
if (ti === 0) return out
|
||||
tmp.length = ti
|
||||
return out + String.fromCharCode.apply(String, tmp)
|
||||
}
|
||||
|
||||
export function encode(string, loose) {
|
||||
const length = string.length
|
||||
let small = true
|
||||
let bytes = new Uint8Array(length) // assume ascii
|
||||
|
||||
let i = encodeAsciiPrefix(bytes, string)
|
||||
let p = i
|
||||
for (; i < length; i++) {
|
||||
let code = string.charCodeAt(i)
|
||||
if (code < 0x80) {
|
||||
bytes[p++] = code
|
||||
// Unroll the loop a bit for faster ops
|
||||
while (true) {
|
||||
i++
|
||||
if (i >= length) break
|
||||
code = string.charCodeAt(i)
|
||||
if (code >= 0x80) break
|
||||
bytes[p++] = code
|
||||
i++
|
||||
if (i >= length) break
|
||||
code = string.charCodeAt(i)
|
||||
if (code >= 0x80) break
|
||||
bytes[p++] = code
|
||||
i++
|
||||
if (i >= length) break
|
||||
code = string.charCodeAt(i)
|
||||
if (code >= 0x80) break
|
||||
bytes[p++] = code
|
||||
i++
|
||||
if (i >= length) break
|
||||
code = string.charCodeAt(i)
|
||||
if (code >= 0x80) break
|
||||
bytes[p++] = code
|
||||
}
|
||||
|
||||
if (i >= length) break
|
||||
// now, code is present and >= 0x80
|
||||
}
|
||||
|
||||
if (small) {
|
||||
// TODO: use resizable array buffers? will have to return a non-resizeable one
|
||||
if (p !== i) /* c8 ignore next */ throw new Error('Unreachable') // Here, p === i (only when small is still true)
|
||||
const bytesNew = new Uint8Array(p + (length - i) * 3) // maximium can be 3x of the string length in charcodes
|
||||
bytesNew.set(bytes)
|
||||
bytes = bytesNew
|
||||
small = false
|
||||
}
|
||||
|
||||
// surrogate, charcodes = [d800 + a & 3ff, dc00 + b & 3ff]; codePoint = 0x1_00_00 | (a << 10) | b
|
||||
// lead: d800 - dbff
|
||||
// trail: dc00 - dfff
|
||||
if (code >= 0xd8_00 && code < 0xe0_00) {
|
||||
// Can't be a valid trail as we already processed that below
|
||||
|
||||
if (code > 0xdb_ff || i + 1 >= length) {
|
||||
// An unexpected trail or a lead at the very end of input
|
||||
if (!loose) throw new TypeError(E_STRICT_UNICODE)
|
||||
bytes[p++] = 0xef
|
||||
bytes[p++] = 0xbf
|
||||
bytes[p++] = 0xbd
|
||||
continue
|
||||
}
|
||||
|
||||
const next = string.charCodeAt(i + 1) // Process valid pairs immediately
|
||||
if (next >= 0xdc_00 && next < 0xe0_00) {
|
||||
// here, codePoint is always between 0x1_00_00 and 0x11_00_00, we encode as 4 bytes
|
||||
const codePoint = (((code - 0xd8_00) << 10) | (next - 0xdc_00)) + 0x1_00_00
|
||||
bytes[p++] = (codePoint >> 18) | 0xf0
|
||||
bytes[p++] = ((codePoint >> 12) & 0x3f) | 0x80
|
||||
bytes[p++] = ((codePoint >> 6) & 0x3f) | 0x80
|
||||
bytes[p++] = (codePoint & 0x3f) | 0x80
|
||||
i++ // consume next
|
||||
} else {
|
||||
// Next is not a trail, leave next unconsumed but process unmatched lead error
|
||||
if (!loose) throw new TypeError(E_STRICT_UNICODE)
|
||||
bytes[p++] = 0xef
|
||||
bytes[p++] = 0xbf
|
||||
bytes[p++] = 0xbd
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// We are left with a non-pair char code above ascii, it gets encoded to 2 or 3 bytes
|
||||
if (code < 0x8_00) {
|
||||
bytes[p++] = (code >> 6) | 0xc0
|
||||
bytes[p++] = (code & 0x3f) | 0x80
|
||||
} else {
|
||||
bytes[p++] = (code >> 12) | 0xe0
|
||||
bytes[p++] = ((code >> 6) & 0x3f) | 0x80
|
||||
bytes[p++] = (code & 0x3f) | 0x80
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.length === p ? bytes : bytes.slice(0, p)
|
||||
}
|
||||
35
node_modules/@exodus/bytes/hex.d.ts
generated
vendored
Normal file
35
node_modules/@exodus/bytes/hex.d.ts
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Implements Base16 from [RFC4648](https://datatracker.ietf.org/doc/html/rfc4648)
|
||||
* (no differences from [RFC3548](https://datatracker.ietf.org/doc/html/rfc4648)).
|
||||
*
|
||||
* ```js
|
||||
* import { fromHex, toHex } from '@exodus/bytes/hex.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/hex.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Encode a `Uint8Array` to a lowercase hex string
|
||||
*
|
||||
* @param arr - The input bytes
|
||||
* @returns The hex encoded string
|
||||
*/
|
||||
export function toHex(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Decode a hex string to bytes
|
||||
*
|
||||
* Unlike `Buffer.from()`, throws on invalid input
|
||||
*
|
||||
* @param string - The hex encoded string (case-insensitive)
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The decoded bytes
|
||||
*/
|
||||
export function fromHex(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
export function fromHex(string: string, format: 'buffer'): Buffer;
|
||||
export function fromHex(string: string, format?: OutputFormat): Uint8ArrayBuffer | Buffer;
|
||||
19
node_modules/@exodus/bytes/hex.js
generated
vendored
Normal file
19
node_modules/@exodus/bytes/hex.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { typedView } from './array.js'
|
||||
import { skipWeb } from './fallback/_utils.js'
|
||||
import * as js from './fallback/hex.js'
|
||||
|
||||
const { toHex: webHex } = Uint8Array.prototype // Modern engines have this
|
||||
|
||||
export function toHex(arr) {
|
||||
assertUint8(arr)
|
||||
if (arr.length === 0) return ''
|
||||
if (!skipWeb && webHex && arr.toHex === webHex) return arr.toHex()
|
||||
return js.toHex(arr)
|
||||
}
|
||||
|
||||
// Unlike Buffer.from(), throws on invalid input
|
||||
export const fromHex =
|
||||
!skipWeb && Uint8Array.fromHex
|
||||
? (str, format = 'uint8') => typedView(Uint8Array.fromHex(str), format)
|
||||
: (str, format = 'uint8') => typedView(js.fromHex(str), format)
|
||||
29
node_modules/@exodus/bytes/hex.node.js
generated
vendored
Normal file
29
node_modules/@exodus/bytes/hex.node.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { typedView } from './array.js'
|
||||
import { E_STRING } from './fallback/_utils.js'
|
||||
import { E_HEX } from './fallback/hex.js'
|
||||
|
||||
if (Buffer.TYPED_ARRAY_SUPPORT) throw new Error('Unexpected Buffer polyfill')
|
||||
|
||||
const { toHex: webHex } = Uint8Array.prototype // Modern engines have this
|
||||
const denoBug = Buffer.from('ag', 'hex').length > 0
|
||||
|
||||
export function toHex(arr) {
|
||||
assertUint8(arr)
|
||||
if (arr.length === 0) return ''
|
||||
if (webHex && arr.toHex === webHex) return arr.toHex()
|
||||
if (arr.constructor === Buffer && Buffer.isBuffer(arr)) return arr.hexSlice(0, arr.byteLength)
|
||||
return Buffer.from(arr.buffer, arr.byteOffset, arr.byteLength).hexSlice(0, arr.byteLength)
|
||||
}
|
||||
|
||||
// Unlike Buffer.from(), throws on invalid input
|
||||
export const fromHex = Uint8Array.fromHex
|
||||
? (str, format = 'uint8') => typedView(Uint8Array.fromHex(str), format)
|
||||
: (str, format = 'uint8') => {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (str.length % 2 !== 0) throw new SyntaxError(E_HEX)
|
||||
if (denoBug && /[^\dA-Fa-f]/.test(str)) throw new SyntaxError(E_HEX)
|
||||
const buf = Buffer.from(str, 'hex') // will stop on first non-hex character, so we can just validate length
|
||||
if (buf.length * 2 !== str.length) throw new SyntaxError(E_HEX)
|
||||
return typedView(buf, format)
|
||||
}
|
||||
43
node_modules/@exodus/bytes/index.d.ts
generated
vendored
Normal file
43
node_modules/@exodus/bytes/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* ### The `@exodus/bytes` package consists of submodules, there is no single export.
|
||||
* Import specific submodules instead.
|
||||
*
|
||||
* See [README](https://github.com/ExodusOSS/bytes/blob/main/README.md).
|
||||
*
|
||||
* Example:
|
||||
* ```js
|
||||
* import { fromHex, toHex } from '@exodus/bytes/hex.js'
|
||||
* import { fromBase64, toBase64, fromBase64url, toBase64url, fromBase64any } from '@exodus/bytes/base64.js'
|
||||
* import { fromBase32, toBase32, fromBase32hex, toBase32hex } from '@exodus/bytes/base32.js'
|
||||
* import { fromBase58, toBase58, fromBase58xrp, toBase58xrp } from '@exodus/bytes/base58.js'
|
||||
* import { fromBech32, toBech32, fromBech32m, toBech32m, getPrefix } from '@exodus/bytes/bech32.js'
|
||||
* import { fromBigInt, toBigInt } from '@exodus/bytes/bigint.js'
|
||||
*
|
||||
* import { utf8fromString, utf8toString, utf8fromStringLoose, utf8toStringLoose } from '@exodus/bytes/utf8.js'
|
||||
* import { utf16fromString, utf16toString, utf16fromStringLoose, utf16toStringLoose } from '@exodus/bytes/utf16.js'
|
||||
* import {
|
||||
* createSinglebyteDecoder, createSinglebyteEncoder,
|
||||
* windows1252toString, windows1252fromString,
|
||||
* latin1toString, latin1fromString } from '@exodus/bytes/single-byte.js'
|
||||
* import { createMultibyteDecoder, createMultibyteEncoder } from '@exodus/bytes/multi-byte.js'
|
||||
*
|
||||
* import {
|
||||
* fromBase58check, toBase58check,
|
||||
* fromBase58checkSync, toBase58checkSync,
|
||||
* makeBase58check } from '@exodus/bytes/base58check.js'
|
||||
* import { fromWifString, toWifString, fromWifStringSync, toWifStringSync } from '@exodus/bytes/wif.js'
|
||||
*
|
||||
* // All encodings from the WHATWG Encoding spec
|
||||
* import { TextDecoder, TextEncoder, TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding.js'
|
||||
* import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding.js'
|
||||
*
|
||||
* // Omits legacy multi-byte decoders to save bundle size
|
||||
* import { TextDecoder, TextEncoder, TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-lite.js'
|
||||
* import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding-lite.js'
|
||||
*
|
||||
* // In browser bundles, uses built-in TextDecoder / TextEncoder to save bundle size
|
||||
* import { TextDecoder, TextEncoder, TextDecoderStream, TextEncoderStream } from '@exodus/bytes/encoding-browser.js'
|
||||
* import { getBOMEncoding, legacyHookDecode, labelToName, normalizeEncoding } from '@exodus/bytes/encoding-browser.js'
|
||||
* ```
|
||||
*/
|
||||
declare module '@exodus/bytes' {}
|
||||
5
node_modules/@exodus/bytes/index.js
generated
vendored
Normal file
5
node_modules/@exodus/bytes/index.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
throw new Error(
|
||||
`This package consists of submodules, there is no single export. Import specific submodules instead.
|
||||
See README: https://github.com/ExodusOSS/bytes/blob/main/README.md
|
||||
`
|
||||
)
|
||||
64
node_modules/@exodus/bytes/multi-byte.d.ts
generated
vendored
Normal file
64
node_modules/@exodus/bytes/multi-byte.d.ts
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
/**
|
||||
* Decode / encode the legacy multi-byte encodings according to the
|
||||
* [Encoding standard](https://encoding.spec.whatwg.org/)
|
||||
* ([§10](https://encoding.spec.whatwg.org/#legacy-multi-byte-chinese-(simplified)-encodings),
|
||||
* [§11](https://encoding.spec.whatwg.org/#legacy-multi-byte-chinese-(traditional)-encodings),
|
||||
* [§12](https://encoding.spec.whatwg.org/#legacy-multi-byte-japanese-encodings),
|
||||
* [§13](https://encoding.spec.whatwg.org/#legacy-multi-byte-korean-encodings)).
|
||||
*
|
||||
* ```js
|
||||
* import { createMultibyteDecoder, createMultibyteEncoder } from '@exodus/bytes/multi-byte.js'
|
||||
* ```
|
||||
*
|
||||
* > [!WARNING]
|
||||
* > This is a lower-level API for legacy multi-byte encodings.
|
||||
* >
|
||||
* > For a safe WHATWG Encoding-compatible API, see `@exodus/bytes/encoding.js` import (and variants of it).
|
||||
* >
|
||||
* > Be sure to know what you are doing and check documentation when directly using encodings from this file.
|
||||
*
|
||||
* Supports all legacy multi-byte encodings listed in the WHATWG Encoding standard:
|
||||
* `gbk`, `gb18030`, `big5`, `euc-jp`, `iso-2022-jp`, `shift_jis`, `euc-kr`.
|
||||
*
|
||||
* @module @exodus/bytes/multi-byte.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Create a decoder for a supported legacy multi-byte `encoding`, given its lowercased name `encoding`.
|
||||
*
|
||||
* Returns a function `decode(arr, stream = false)` that decodes bytes to a string.
|
||||
*
|
||||
* The returned function will maintain internal state while `stream = true` is used, allowing it to
|
||||
* handle incomplete multi-byte sequences across multiple calls.
|
||||
* State is reset when `stream = false` or when the function is called without the `stream` parameter.
|
||||
*
|
||||
* @param encoding - The encoding name (e.g., 'gbk', 'gb18030', 'big5', 'euc-jp', 'iso-2022-jp', 'shift_jis', 'euc-kr')
|
||||
* @param loose - If true, replaces unmapped bytes with replacement character instead of throwing (default: false)
|
||||
* @returns A function that decodes bytes to string, with optional streaming support
|
||||
*/
|
||||
export function createMultibyteDecoder(
|
||||
encoding: string,
|
||||
loose?: boolean
|
||||
): (arr: Uint8Array, stream?: boolean) => string;
|
||||
|
||||
/**
|
||||
* Create an encoder for a supported legacy multi-byte `encoding`, given its lowercased name `encoding`.
|
||||
*
|
||||
* Returns a function `encode(string)` that encodes a string to bytes.
|
||||
*
|
||||
* In `'fatal'` mode (default), will throw on non well-formed strings or any codepoints which could
|
||||
* not be encoded in the target encoding.
|
||||
*
|
||||
* @param encoding - The encoding name (e.g., 'gbk', 'gb18030', 'big5', 'euc-jp', 'iso-2022-jp', 'shift_jis', 'euc-kr')
|
||||
* @param options - Encoding options
|
||||
* @param options.mode - Encoding mode (default: 'fatal'). Currently, only 'fatal' mode is supported.
|
||||
* @returns A function that encodes string to bytes
|
||||
*/
|
||||
export function createMultibyteEncoder(
|
||||
encoding: string,
|
||||
options?: { mode?: 'fatal' }
|
||||
): (string: string) => Uint8ArrayBuffer;
|
||||
19
node_modules/@exodus/bytes/multi-byte.js
generated
vendored
Normal file
19
node_modules/@exodus/bytes/multi-byte.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { multibyteDecoder, multibyteEncoder } from './fallback/multi-byte.js'
|
||||
|
||||
export function createMultibyteDecoder(encoding, loose = false) {
|
||||
const jsDecoder = multibyteDecoder(encoding, loose) // asserts
|
||||
let streaming = false
|
||||
return (arr, stream = false) => {
|
||||
assertUint8(arr)
|
||||
if (!streaming && arr.byteLength === 0) return ''
|
||||
streaming = stream
|
||||
return jsDecoder(arr, stream)
|
||||
}
|
||||
}
|
||||
|
||||
export function createMultibyteEncoder(encoding, { mode = 'fatal' } = {}) {
|
||||
// TODO: replacement, truncate (replacement will need varying length)
|
||||
if (mode !== 'fatal') throw new Error('Unsupported mode')
|
||||
return multibyteEncoder(encoding) // asserts
|
||||
}
|
||||
29
node_modules/@exodus/bytes/multi-byte.node.js
generated
vendored
Normal file
29
node_modules/@exodus/bytes/multi-byte.node.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { isDeno, toBuf } from './fallback/_utils.js'
|
||||
import { isAsciiSuperset, multibyteDecoder, multibyteEncoder } from './fallback/multi-byte.js'
|
||||
import { isAscii } from 'node:buffer'
|
||||
|
||||
export function createMultibyteDecoder(encoding, loose = false) {
|
||||
const jsDecoder = multibyteDecoder(encoding, loose) // asserts
|
||||
let streaming = false
|
||||
const asciiSuperset = isAsciiSuperset(encoding)
|
||||
return (arr, stream = false) => {
|
||||
assertUint8(arr)
|
||||
if (!streaming) {
|
||||
if (arr.byteLength === 0) return ''
|
||||
if (asciiSuperset && isAscii(arr)) {
|
||||
if (isDeno) return toBuf(arr).toString()
|
||||
return toBuf(arr).latin1Slice(0, arr.byteLength) // .latin1Slice is faster than .asciiSlice
|
||||
}
|
||||
}
|
||||
|
||||
streaming = stream
|
||||
return jsDecoder(arr, stream)
|
||||
}
|
||||
}
|
||||
|
||||
export function createMultibyteEncoder(encoding, { mode = 'fatal' } = {}) {
|
||||
// TODO: replacement, truncate (replacement will need varying length)
|
||||
if (mode !== 'fatal') throw new Error('Unsupported mode')
|
||||
return multibyteEncoder(encoding) // asserts
|
||||
}
|
||||
272
node_modules/@exodus/bytes/package.json
generated
vendored
Normal file
272
node_modules/@exodus/bytes/package.json
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
{
|
||||
"name": "@exodus/bytes",
|
||||
"version": "1.11.0",
|
||||
"description": "Various operations on Uint8Array data",
|
||||
"keywords": [
|
||||
"encoding",
|
||||
"uint8array",
|
||||
"textdecoder",
|
||||
"textencoder",
|
||||
"utf8",
|
||||
"utf16",
|
||||
"hex",
|
||||
"base64",
|
||||
"base32",
|
||||
"base58",
|
||||
"base58check",
|
||||
"bech32",
|
||||
"bech32m",
|
||||
"wif"
|
||||
],
|
||||
"scripts": {
|
||||
"lint": "eslint .",
|
||||
"typedoc": "typedoc && mkdir -p doc/assets && cp -r theme/styles doc/assets/",
|
||||
"test:javascriptcore": "npm run test:jsc --",
|
||||
"test:v8": "exodus-test --engine=v8:bundle",
|
||||
"test:jsc": "exodus-test --engine=jsc:bundle",
|
||||
"test:spidermonkey": "exodus-test --engine=spidermonkey:bundle",
|
||||
"test:hermes": "exodus-test --engine=hermes:bundle",
|
||||
"test:quickjs": "exodus-test --engine=quickjs:bundle",
|
||||
"test:xs": "EXODUS_TEST_IGNORE='tests/whatwg.browser.test.js' exodus-test --engine=xs:bundle",
|
||||
"test:engine262": "exodus-test --engine=engine262:bundle",
|
||||
"test:deno": "exodus-test --engine=deno:pure",
|
||||
"test:bun": "exodus-test --engine=bun:pure",
|
||||
"test:electron:bundle": "exodus-test --engine=electron:bundle",
|
||||
"test:electron:as-node": "exodus-test --engine=electron-as-node:test",
|
||||
"test:chrome:puppeteer": "exodus-test --engine=chrome:puppeteer",
|
||||
"test:chromium:playwright": "exodus-test --engine=chromium:playwright",
|
||||
"test:webkit:playwright": "exodus-test --engine=webkit:playwright",
|
||||
"test:firefox:puppeteer": "exodus-test --engine=firefox:puppeteer",
|
||||
"test:firefox:playwright": "exodus-test --engine=firefox:playwright",
|
||||
"test:servo:bundle": "exodus-test --engine=servo:bundle",
|
||||
"test": "exodus-test",
|
||||
"size": "esbuild --minify --bundle",
|
||||
"jsvu": "jsvu",
|
||||
"playwright": "exodus-test --playwright",
|
||||
"benchmark": "exodus-test --concurrency=1 benchmarks/*.bench.js",
|
||||
"coverage": "exodus-test --coverage"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/ExodusOSS/bytes.git"
|
||||
},
|
||||
"author": "Exodus Movement, Inc.",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/ExodusOSS/bytes/issues"
|
||||
},
|
||||
"homepage": "https://github.com/ExodusOSS/bytes",
|
||||
"engines": {
|
||||
"node": "^20.19.0 || ^22.12.0 || >=24.0.0"
|
||||
},
|
||||
"type": "module",
|
||||
"files": [
|
||||
"/fallback/_utils.js",
|
||||
"/fallback/base32.js",
|
||||
"/fallback/base58check.js",
|
||||
"/fallback/base64.js",
|
||||
"/fallback/encoding.js",
|
||||
"/fallback/encoding.api.js",
|
||||
"/fallback/encoding.labels.js",
|
||||
"/fallback/encoding.util.js",
|
||||
"/fallback/hex.js",
|
||||
"/fallback/latin1.js",
|
||||
"/fallback/percent.js",
|
||||
"/fallback/multi-byte.encodings.cjs",
|
||||
"/fallback/multi-byte.encodings.json",
|
||||
"/fallback/multi-byte.js",
|
||||
"/fallback/multi-byte.table.js",
|
||||
"/fallback/single-byte.encodings.js",
|
||||
"/fallback/single-byte.js",
|
||||
"/fallback/utf16.js",
|
||||
"/fallback/utf8.js",
|
||||
"/array.js",
|
||||
"/array.d.ts",
|
||||
"/assert.js",
|
||||
"/base32.js",
|
||||
"/base32.d.ts",
|
||||
"/base58.js",
|
||||
"/base58.d.ts",
|
||||
"/base58check.js",
|
||||
"/base58check.d.ts",
|
||||
"/base58check.node.js",
|
||||
"/base64.js",
|
||||
"/base64.d.ts",
|
||||
"/bech32.js",
|
||||
"/bech32.d.ts",
|
||||
"/bigint.js",
|
||||
"/bigint.d.ts",
|
||||
"/encoding-browser.js",
|
||||
"/encoding-browser.browser.js",
|
||||
"/encoding-browser.native.js",
|
||||
"/encoding-browser.d.ts",
|
||||
"/encoding.js",
|
||||
"/encoding.d.ts",
|
||||
"/encoding-lite.js",
|
||||
"/encoding-lite.d.ts",
|
||||
"/hex.js",
|
||||
"/hex.d.ts",
|
||||
"/hex.node.js",
|
||||
"/index.js",
|
||||
"/index.d.ts",
|
||||
"/multi-byte.js",
|
||||
"/multi-byte.d.ts",
|
||||
"/multi-byte.node.js",
|
||||
"/single-byte.js",
|
||||
"/single-byte.d.ts",
|
||||
"/single-byte.node.js",
|
||||
"/utf16.js",
|
||||
"/utf16.d.ts",
|
||||
"/utf16.node.js",
|
||||
"/utf8.js",
|
||||
"/utf8.d.ts",
|
||||
"/utf8.node.js",
|
||||
"/whatwg.js",
|
||||
"/whatwg.d.ts",
|
||||
"/wif.js",
|
||||
"/wif.d.ts"
|
||||
],
|
||||
"main": "index.js",
|
||||
"module": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./index.js"
|
||||
},
|
||||
"./array.js": {
|
||||
"types": "./array.d.ts",
|
||||
"default": "./array.js"
|
||||
},
|
||||
"./base32.js": {
|
||||
"types": "./base32.d.ts",
|
||||
"default": "./base32.js"
|
||||
},
|
||||
"./base58.js": {
|
||||
"types": "./base58.d.ts",
|
||||
"default": "./base58.js"
|
||||
},
|
||||
"./base58check.js": {
|
||||
"types": "./base58check.d.ts",
|
||||
"node": "./base58check.node.js",
|
||||
"default": "./base58check.js"
|
||||
},
|
||||
"./base64.js": {
|
||||
"types": "./base64.d.ts",
|
||||
"default": "./base64.js"
|
||||
},
|
||||
"./bech32.js": {
|
||||
"types": "./bech32.d.ts",
|
||||
"default": "./bech32.js"
|
||||
},
|
||||
"./bigint.js": {
|
||||
"types": "./bigint.d.ts",
|
||||
"default": "./bigint.js"
|
||||
},
|
||||
"./hex.js": {
|
||||
"types": "./hex.d.ts",
|
||||
"node": "./hex.node.js",
|
||||
"default": "./hex.js"
|
||||
},
|
||||
"./multi-byte.js": {
|
||||
"types": "./multi-byte.d.ts",
|
||||
"node": "./multi-byte.node.js",
|
||||
"default": "./multi-byte.js"
|
||||
},
|
||||
"./single-byte.js": {
|
||||
"types": "./single-byte.d.ts",
|
||||
"node": "./single-byte.node.js",
|
||||
"default": "./single-byte.js"
|
||||
},
|
||||
"./encoding.js": {
|
||||
"types": "./encoding.d.ts",
|
||||
"default": "./encoding.js"
|
||||
},
|
||||
"./encoding-lite.js": {
|
||||
"types": "./encoding-lite.d.ts",
|
||||
"default": "./encoding-lite.js"
|
||||
},
|
||||
"./encoding-browser.js": {
|
||||
"types": "./encoding-browser.d.ts",
|
||||
"node": "./encoding-browser.js",
|
||||
"react-native": "./encoding-browser.native.js",
|
||||
"browser": "./encoding-browser.browser.js",
|
||||
"default": "./encoding-browser.js"
|
||||
},
|
||||
"./utf16.js": {
|
||||
"types": "./utf16.d.ts",
|
||||
"node": "./utf16.node.js",
|
||||
"default": "./utf16.js"
|
||||
},
|
||||
"./utf8.js": {
|
||||
"types": "./utf8.d.ts",
|
||||
"node": "./utf8.node.js",
|
||||
"default": "./utf8.js"
|
||||
},
|
||||
"./whatwg.js": {
|
||||
"types": "./whatwg.d.ts",
|
||||
"default": "./whatwg.js"
|
||||
},
|
||||
"./wif.js": {
|
||||
"types": "./wif.d.ts",
|
||||
"default": "./wif.js"
|
||||
}
|
||||
},
|
||||
"react-native": {
|
||||
"./encoding-browser.js": "./encoding-browser.native.js"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@noble/hashes": "^1.8.0 || ^2.0.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@noble/hashes": {
|
||||
"optional": true
|
||||
}
|
||||
},
|
||||
"devDependencies": {
|
||||
"@ethersproject/strings": "^5.8.0",
|
||||
"@exodus/crypto": "^1.0.0-rc.30",
|
||||
"@exodus/eslint-config": "^5.24.0",
|
||||
"@exodus/prettier": "^1.0.0",
|
||||
"@exodus/test": "^1.0.0-rc.109",
|
||||
"@hexagon/base64": "^2.0.4",
|
||||
"@noble/hashes": "^2.0.1",
|
||||
"@oslojs/encoding": "^1.1.0",
|
||||
"@petamoriken/float16": "^3.9.3",
|
||||
"@scure/base": "^1.2.6",
|
||||
"@stablelib/base64": "^2.0.1",
|
||||
"@stablelib/hex": "^2.0.1",
|
||||
"@types/node": "^22.12.0",
|
||||
"base-x": "^5.0.1",
|
||||
"base32.js": "^0.1.0",
|
||||
"base58-js": "^3.0.3",
|
||||
"base64-js": "^1.5.1",
|
||||
"bech32": "^2.0.0",
|
||||
"bs58": "^6.0.0",
|
||||
"bs58check": "^4.0.0",
|
||||
"bstring": "^0.3.9",
|
||||
"buffer": "^6.0.3",
|
||||
"c8": "^10.1.3",
|
||||
"decode-utf8": "^1.0.1",
|
||||
"electron": "36.5.0",
|
||||
"encode-utf8": "^2.0.0",
|
||||
"esbuild": "^0.27.2",
|
||||
"eslint": "^8.44.0",
|
||||
"fast-base64-decode": "^2.0.0",
|
||||
"fast-base64-encode": "^1.0.0",
|
||||
"hextreme": "^1.0.7",
|
||||
"hi-base32": "^0.5.1",
|
||||
"iconv-lite": "^0.7.0",
|
||||
"jsvu": "^3.0.3",
|
||||
"punycode": "^2.3.1",
|
||||
"text-encoding": "^0.7.0",
|
||||
"typedoc": "^0.28.16",
|
||||
"typescript": "^5.9.3",
|
||||
"uint8array-tools": "^0.0.9",
|
||||
"utf8": "^3.0.0",
|
||||
"web-streams-polyfill": "^4.2.0",
|
||||
"wif": "^5.0.0"
|
||||
},
|
||||
"prettier": "@exodus/prettier",
|
||||
"packageManager": "pnpm@10.12.1+sha256.889bac470ec93ccc3764488a19d6ba8f9c648ad5e50a9a6e4be3768a5de387a3"
|
||||
}
|
||||
159
node_modules/@exodus/bytes/single-byte.d.ts
generated
vendored
Normal file
159
node_modules/@exodus/bytes/single-byte.d.ts
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
/**
|
||||
* Decode / encode the legacy single-byte encodings according to the
|
||||
* [Encoding standard](https://encoding.spec.whatwg.org/)
|
||||
* ([§9](https://encoding.spec.whatwg.org/#legacy-single-byte-encodings),
|
||||
* [§14.5](https://encoding.spec.whatwg.org/#x-user-defined)),
|
||||
* and [unicode.org](https://unicode.org/Public/MAPPINGS/ISO8859) `iso-8859-*` mappings.
|
||||
*
|
||||
* ```js
|
||||
* import { createSinglebyteDecoder, createSinglebyteEncoder } from '@exodus/bytes/single-byte.js'
|
||||
* import { windows1252toString, windows1252fromString } from '@exodus/bytes/single-byte.js'
|
||||
* import { latin1toString, latin1fromString } from '@exodus/bytes/single-byte.js'
|
||||
* ```
|
||||
*
|
||||
* > [!WARNING]
|
||||
* > This is a lower-level API for single-byte encodings.
|
||||
* > It might not match what you expect, as it supports both WHATWG and unicode.org encodings under
|
||||
* > different names, with the main intended usecase for the latter being either non-web or legacy contexts.
|
||||
* >
|
||||
* > For a safe WHATWG Encoding-compatible API, see `@exodus/bytes/encoding.js` import (and variants of it).
|
||||
* >
|
||||
* > Be sure to know what you are doing and check documentation when directly using encodings from this file.
|
||||
*
|
||||
* Supports all single-byte encodings listed in the WHATWG Encoding standard:
|
||||
* `ibm866`, `iso-8859-2`, `iso-8859-3`, `iso-8859-4`, `iso-8859-5`, `iso-8859-6`, `iso-8859-7`, `iso-8859-8`,
|
||||
* `iso-8859-8-i`, `iso-8859-10`, `iso-8859-13`, `iso-8859-14`, `iso-8859-15`, `iso-8859-16`, `koi8-r`, `koi8-u`,
|
||||
* `macintosh`, `windows-874`, `windows-1250`, `windows-1251`, `windows-1252`, `windows-1253`, `windows-1254`,
|
||||
* `windows-1255`, `windows-1256`, `windows-1257`, `windows-1258`, `x-mac-cyrillic` and `x-user-defined`.
|
||||
*
|
||||
* Also supports `iso-8859-1`, `iso-8859-9`, `iso-8859-11` as defined at
|
||||
* [unicode.org](https://unicode.org/Public/MAPPINGS/ISO8859)
|
||||
* (and all other `iso-8859-*` encodings there as they match WHATWG).
|
||||
*
|
||||
* > [!NOTE]
|
||||
* > While all `iso-8859-*` encodings supported by the [WHATWG Encoding standard](https://encoding.spec.whatwg.org/) match
|
||||
* > [unicode.org](https://unicode.org/Public/MAPPINGS/ISO8859), the WHATWG Encoding spec doesn't support
|
||||
* > `iso-8859-1`, `iso-8859-9`, `iso-8859-11`, and instead maps them as labels to `windows-1252`, `windows-1254`, `windows-874`.\
|
||||
* > `createSinglebyteDecoder()` (unlike `TextDecoder` or `legacyHookDecode()`) does not do such mapping,
|
||||
* > so its results will differ from `TextDecoder` for those encoding names.
|
||||
*
|
||||
* ```js
|
||||
* > new TextDecoder('iso-8859-1').encoding
|
||||
* 'windows-1252'
|
||||
* > new TextDecoder('iso-8859-9').encoding
|
||||
* 'windows-1254'
|
||||
* > new TextDecoder('iso-8859-11').encoding
|
||||
* 'windows-874'
|
||||
* > new TextDecoder('iso-8859-9').decode(Uint8Array.of(0x80, 0x81, 0xd0))
|
||||
* '€\x81Ğ' // this is actually decoded according to windows-1254 per TextDecoder spec
|
||||
* > createSinglebyteDecoder('iso-8859-9')(Uint8Array.of(0x80, 0x81, 0xd0))
|
||||
* '\x80\x81Ğ' // this is iso-8859-9 as defined at https://unicode.org/Public/MAPPINGS/ISO8859/8859-9.txt
|
||||
* ```
|
||||
*
|
||||
* All WHATWG Encoding spec [`windows-*` encodings](https://encoding.spec.whatwg.org/#windows-874) are supersets of
|
||||
* corresponding [unicode.org encodings](https://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/), meaning that
|
||||
* they encode/decode all the old valid (non-replacement) strings / byte sequences identically, but can also support
|
||||
* a wider range of inputs.
|
||||
*
|
||||
* @module @exodus/bytes/single-byte.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Create a decoder for a supported one-byte `encoding`, given its lowercased name `encoding`.
|
||||
*
|
||||
* Returns a function `decode(arr)` that decodes bytes to a string.
|
||||
*
|
||||
* @param encoding - The encoding name (e.g., 'iso-8859-1', 'windows-1252')
|
||||
* @param loose - If true, replaces unmapped bytes with replacement character instead of throwing (default: false)
|
||||
* @returns A function that decodes bytes to string
|
||||
*/
|
||||
export function createSinglebyteDecoder(
|
||||
encoding: string,
|
||||
loose?: boolean
|
||||
): (arr: Uint8Array) => string;
|
||||
|
||||
/**
|
||||
* Create an encoder for a supported one-byte `encoding`, given its lowercased name `encoding`.
|
||||
*
|
||||
* Returns a function `encode(string)` that encodes a string to bytes.
|
||||
*
|
||||
* In `'fatal'` mode (default), will throw on non well-formed strings or any codepoints which could
|
||||
* not be encoded in the target encoding.
|
||||
*
|
||||
* @param encoding - The encoding name (e.g., 'iso-8859-1', 'windows-1252')
|
||||
* @param options - Encoding options
|
||||
* @param options.mode - Encoding mode (default: 'fatal'). Currently, only 'fatal' mode is supported.
|
||||
* @returns A function that encodes string to bytes
|
||||
*/
|
||||
export function createSinglebyteEncoder(
|
||||
encoding: string,
|
||||
options?: { mode?: 'fatal' }
|
||||
): (string: string) => Uint8ArrayBuffer;
|
||||
|
||||
/**
|
||||
* Decode `iso-8859-1` bytes to a string.
|
||||
*
|
||||
* There is no loose variant for this encoding, all bytes can be decoded.
|
||||
*
|
||||
* Same as:
|
||||
* ```js
|
||||
* const latin1toString = createSinglebyteDecoder('iso-8859-1')
|
||||
* ```
|
||||
*
|
||||
* > [!NOTE]
|
||||
* > This is different from `new TextDecoder('iso-8859-1')` and `new TextDecoder('latin1')`, as those
|
||||
* > alias to `new TextDecoder('windows-1252')`.
|
||||
*
|
||||
* @param arr - The bytes to decode
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function latin1toString(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Encode a string to `iso-8859-1` bytes.
|
||||
*
|
||||
* Throws on non well-formed strings or any codepoints which could not be encoded in `iso-8859-1`.
|
||||
*
|
||||
* Same as:
|
||||
* ```js
|
||||
* const latin1fromString = createSinglebyteEncoder('iso-8859-1', { mode: 'fatal' })
|
||||
* ```
|
||||
*
|
||||
* @param string - The string to encode
|
||||
* @returns The encoded bytes
|
||||
*/
|
||||
export function latin1fromString(string: string): Uint8ArrayBuffer;
|
||||
|
||||
/**
|
||||
* Decode `windows-1252` bytes to a string.
|
||||
*
|
||||
* There is no loose variant for this encoding, all bytes can be decoded.
|
||||
*
|
||||
* Same as:
|
||||
* ```js
|
||||
* const windows1252toString = createSinglebyteDecoder('windows-1252')
|
||||
* ```
|
||||
*
|
||||
* @param arr - The bytes to decode
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function windows1252toString(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Encode a string to `windows-1252` bytes.
|
||||
*
|
||||
* Throws on non well-formed strings or any codepoints which could not be encoded in `windows-1252`.
|
||||
*
|
||||
* Same as:
|
||||
* ```js
|
||||
* const windows1252fromString = createSinglebyteEncoder('windows-1252', { mode: 'fatal' })
|
||||
* ```
|
||||
*
|
||||
* @param string - The string to encode
|
||||
* @returns The encoded bytes
|
||||
*/
|
||||
export function windows1252fromString(string: string): Uint8ArrayBuffer;
|
||||
135
node_modules/@exodus/bytes/single-byte.js
generated
vendored
Normal file
135
node_modules/@exodus/bytes/single-byte.js
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { canDecoders, nativeEncoder, skipWeb, E_STRING } from './fallback/_utils.js'
|
||||
import { encodeAscii, encodeAsciiPrefix, encodeLatin1 } from './fallback/latin1.js'
|
||||
import { assertEncoding, encodingDecoder, encodeMap, E_STRICT } from './fallback/single-byte.js'
|
||||
|
||||
const { TextDecoder, btoa } = globalThis
|
||||
|
||||
let windows1252works
|
||||
|
||||
// prettier-ignore
|
||||
const skipNative = new Set([
|
||||
'iso-8859-1', 'iso-8859-9', 'iso-8859-11', // non-WHATWG
|
||||
'iso-8859-6', 'iso-8859-8', 'iso-8859-8-i', // slow in all 3 engines
|
||||
'iso-8859-16', // iso-8859-16 is somehow broken in WebKit, at least on CI
|
||||
])
|
||||
|
||||
function shouldUseNative(enc) {
|
||||
// https://issues.chromium.org/issues/468458388
|
||||
// Also might be incorrectly imlemented on platforms as Latin1 (e.g. in Node.js) or regress
|
||||
// This is the most significant single-byte encoding, 'ascii' and 'latin1' alias to this
|
||||
// Even after Chrome bug is fixed, this should serve as a quick correctness check that it's actually windows-1252
|
||||
if (enc === 'windows-1252') {
|
||||
if (windows1252works === undefined) {
|
||||
windows1252works = false
|
||||
try {
|
||||
const u = new Uint8Array(9) // using 9 bytes is significant to catch the bug
|
||||
u[8] = 128
|
||||
windows1252works = new TextDecoder(enc).decode(u).codePointAt(8) === 0x20_ac
|
||||
} catch {}
|
||||
}
|
||||
|
||||
return windows1252works
|
||||
}
|
||||
|
||||
return !skipNative.has(enc)
|
||||
}
|
||||
|
||||
export function createSinglebyteDecoder(encoding, loose = false) {
|
||||
if (typeof loose !== 'boolean') throw new TypeError('loose option should be boolean')
|
||||
assertEncoding(encoding)
|
||||
|
||||
if (canDecoders && shouldUseNative(encoding)) {
|
||||
// In try, as not all encodings might be implemented in all engines which have native TextDecoder
|
||||
try {
|
||||
const decoder = new TextDecoder(encoding, { fatal: !loose })
|
||||
return (arr) => {
|
||||
assertUint8(arr)
|
||||
if (arr.byteLength === 0) return ''
|
||||
return decoder.decode(arr)
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const jsDecoder = encodingDecoder(encoding)
|
||||
return (arr) => {
|
||||
assertUint8(arr)
|
||||
if (arr.byteLength === 0) return ''
|
||||
return jsDecoder(arr, loose)
|
||||
}
|
||||
}
|
||||
|
||||
const NON_LATIN = /[^\x00-\xFF]/ // eslint-disable-line no-control-regex
|
||||
|
||||
function encode(s, m) {
|
||||
const len = s.length
|
||||
const x = new Uint8Array(len)
|
||||
let i = nativeEncoder ? 0 : encodeAsciiPrefix(x, s)
|
||||
|
||||
for (const len3 = len - 3; i < len3; i += 4) {
|
||||
const x0 = s.charCodeAt(i), x1 = s.charCodeAt(i + 1), x2 = s.charCodeAt(i + 2), x3 = s.charCodeAt(i + 3) // prettier-ignore
|
||||
const c0 = m[x0], c1 = m[x1], c2 = m[x2], c3 = m[x3] // prettier-ignore
|
||||
if ((!c0 && x0) || (!c1 && x1) || (!c2 && x2) || (!c3 && x3)) return null
|
||||
|
||||
x[i] = c0
|
||||
x[i + 1] = c1
|
||||
x[i + 2] = c2
|
||||
x[i + 3] = c3
|
||||
}
|
||||
|
||||
for (; i < len; i++) {
|
||||
const x0 = s.charCodeAt(i)
|
||||
const c0 = m[x0]
|
||||
if (!c0 && x0) return null
|
||||
x[i] = c0
|
||||
}
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
// fromBase64+btoa path is faster on everything where fromBase64 is fast
|
||||
const useLatin1btoa = Uint8Array.fromBase64 && btoa && !skipWeb
|
||||
|
||||
export function createSinglebyteEncoder(encoding, { mode = 'fatal' } = {}) {
|
||||
// TODO: replacement, truncate (replacement will need varying length)
|
||||
if (mode !== 'fatal') throw new Error('Unsupported mode')
|
||||
const m = encodeMap(encoding) // asserts
|
||||
const isLatin1 = encoding === 'iso-8859-1'
|
||||
|
||||
// No single-byte encoder produces surrogate pairs, so any surrogate is invalid
|
||||
// This needs special treatment only to decide how many replacement chars to output, one or two
|
||||
// Not much use in running isWellFormed, most likely cause of error is unmapped chars, not surrogate pairs
|
||||
return (s) => {
|
||||
if (typeof s !== 'string') throw new TypeError(E_STRING)
|
||||
if (isLatin1) {
|
||||
// max limit is to not produce base64 strings that are too long
|
||||
if (useLatin1btoa && s.length >= 1024 && s.length < 1e8) {
|
||||
try {
|
||||
return Uint8Array.fromBase64(btoa(s)) // fails on non-latin1
|
||||
} catch {
|
||||
throw new TypeError(E_STRICT)
|
||||
}
|
||||
}
|
||||
|
||||
if (NON_LATIN.test(s)) throw new TypeError(E_STRICT)
|
||||
return encodeLatin1(s)
|
||||
}
|
||||
|
||||
// Instead of an ASCII regex check, encode optimistically - this is faster
|
||||
// Check for 8-bit string with a regex though, this is instant on 8-bit strings so doesn't hurt the ASCII fast path
|
||||
if (nativeEncoder && !NON_LATIN.test(s)) {
|
||||
try {
|
||||
return encodeAscii(s, E_STRICT)
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const res = encode(s, m)
|
||||
if (!res) throw new TypeError(E_STRICT)
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
export const latin1toString = createSinglebyteDecoder('iso-8859-1')
|
||||
export const latin1fromString = createSinglebyteEncoder('iso-8859-1')
|
||||
export const windows1252toString = createSinglebyteDecoder('windows-1252')
|
||||
export const windows1252fromString = createSinglebyteEncoder('windows-1252')
|
||||
120
node_modules/@exodus/bytes/single-byte.node.js
generated
vendored
Normal file
120
node_modules/@exodus/bytes/single-byte.node.js
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { isAscii } from 'node:buffer'
|
||||
import { isDeno, isLE, toBuf, E_STRING } from './fallback/_utils.js'
|
||||
import { asciiPrefix } from './fallback/latin1.js'
|
||||
import { encodingMapper, encodingDecoder, encodeMap, E_STRICT } from './fallback/single-byte.js'
|
||||
|
||||
function latin1Prefix(arr, start) {
|
||||
let p = start | 0
|
||||
const length = arr.length
|
||||
for (const len3 = length - 3; p < len3; p += 4) {
|
||||
if ((arr[p] & 0xe0) === 0x80) return p
|
||||
if ((arr[p + 1] & 0xe0) === 0x80) return p + 1
|
||||
if ((arr[p + 2] & 0xe0) === 0x80) return p + 2
|
||||
if ((arr[p + 3] & 0xe0) === 0x80) return p + 3
|
||||
}
|
||||
|
||||
for (; p < length; p++) {
|
||||
if ((arr[p] & 0xe0) === 0x80) return p
|
||||
}
|
||||
|
||||
return length
|
||||
}
|
||||
|
||||
export function createSinglebyteDecoder(encoding, loose = false) {
|
||||
if (typeof loose !== 'boolean') throw new TypeError('loose option should be boolean')
|
||||
if (isDeno) {
|
||||
const jsDecoder = encodingDecoder(encoding) // asserts
|
||||
return (arr) => {
|
||||
assertUint8(arr)
|
||||
if (arr.byteLength === 0) return ''
|
||||
if (isAscii(arr)) return toBuf(arr).toString()
|
||||
return jsDecoder(arr, loose) // somewhy faster on Deno anyway, TODO: optimize?
|
||||
}
|
||||
}
|
||||
|
||||
const isLatin1 = encoding === 'iso-8859-1'
|
||||
const latin1path = encoding === 'windows-1252'
|
||||
const { incomplete, mapper } = encodingMapper(encoding) // asserts
|
||||
return (arr) => {
|
||||
assertUint8(arr)
|
||||
if (arr.byteLength === 0) return ''
|
||||
if (isLatin1 || isAscii(arr)) return toBuf(arr).latin1Slice() // .latin1Slice is faster than .asciiSlice
|
||||
|
||||
// Node.js TextDecoder is broken, so we can't use it. It's also slow anyway
|
||||
|
||||
let prefixBytes = asciiPrefix(arr)
|
||||
let prefix = ''
|
||||
if (latin1path) prefixBytes = latin1Prefix(arr, prefixBytes)
|
||||
if (prefixBytes > 64 || prefixBytes === arr.length) {
|
||||
prefix = toBuf(arr).latin1Slice(0, prefixBytes) // .latin1Slice is faster than .asciiSlice
|
||||
if (prefixBytes === arr.length) return prefix
|
||||
}
|
||||
|
||||
const b = toBuf(mapper(arr, prefix.length)) // prefix.length can mismatch prefixBytes
|
||||
if (!isLE) b.swap16()
|
||||
const suffix = b.ucs2Slice(0, b.byteLength)
|
||||
if (!loose && incomplete && suffix.includes('\uFFFD')) throw new TypeError(E_STRICT)
|
||||
return prefix + suffix
|
||||
}
|
||||
}
|
||||
|
||||
const NON_LATIN = /[^\x00-\xFF]/ // eslint-disable-line no-control-regex
|
||||
|
||||
function encode(s, m) {
|
||||
const len = s.length
|
||||
let i = 0
|
||||
const b = Buffer.from(s, 'utf-16le') // aligned
|
||||
if (!isLE) b.swap16()
|
||||
const x = new Uint16Array(b.buffer, b.byteOffset, b.byteLength / 2)
|
||||
for (const len3 = len - 3; i < len3; i += 4) {
|
||||
const x0 = x[i], x1 = x[i + 1], x2 = x[i + 2], x3 = x[i + 3] // prettier-ignore
|
||||
const c0 = m[x0], c1 = m[x1], c2 = m[x2], c3 = m[x3] // prettier-ignore
|
||||
if (!(c0 && c1 && c2 && c3) && ((!c0 && x0) || (!c1 && x1) || (!c2 && x2) || (!c3 && x3))) return null // prettier-ignore
|
||||
x[i] = c0
|
||||
x[i + 1] = c1
|
||||
x[i + 2] = c2
|
||||
x[i + 3] = c3
|
||||
}
|
||||
|
||||
for (; i < len; i++) {
|
||||
const x0 = x[i]
|
||||
const c0 = m[x0]
|
||||
if (!c0 && x0) return null
|
||||
x[i] = c0
|
||||
}
|
||||
|
||||
return new Uint8Array(x)
|
||||
}
|
||||
|
||||
export function createSinglebyteEncoder(encoding, { mode = 'fatal' } = {}) {
|
||||
// TODO: replacement, truncate (replacement will need varying length)
|
||||
if (mode !== 'fatal') throw new Error('Unsupported mode')
|
||||
const m = encodeMap(encoding) // asserts
|
||||
const isLatin1 = encoding === 'iso-8859-1'
|
||||
|
||||
return (s) => {
|
||||
if (typeof s !== 'string') throw new TypeError(E_STRING)
|
||||
if (isLatin1) {
|
||||
if (NON_LATIN.test(s)) throw new TypeError(E_STRICT)
|
||||
const b = Buffer.from(s, 'latin1')
|
||||
return new Uint8Array(b.buffer, b.byteOffset, b.byteLength)
|
||||
}
|
||||
|
||||
// Instead of an ASCII regex check, encode optimistically - this is faster
|
||||
// Check for 8-bit string with a regex though, this is instant on 8-bit strings so doesn't hurt the ASCII fast path
|
||||
if (!NON_LATIN.test(s)) {
|
||||
const b = Buffer.from(s, 'utf8') // ascii/latin1 coerces, we need to check
|
||||
if (b.length === s.length) return new Uint8Array(b.buffer, b.byteOffset, b.byteLength)
|
||||
}
|
||||
|
||||
const res = encode(s, m)
|
||||
if (!res) throw new TypeError(E_STRICT)
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
export const latin1toString = createSinglebyteDecoder('iso-8859-1')
|
||||
export const latin1fromString = createSinglebyteEncoder('iso-8859-1')
|
||||
export const windows1252toString = createSinglebyteDecoder('windows-1252')
|
||||
export const windows1252fromString = createSinglebyteEncoder('windows-1252')
|
||||
92
node_modules/@exodus/bytes/utf16.d.ts
generated
vendored
Normal file
92
node_modules/@exodus/bytes/utf16.d.ts
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
/**
|
||||
* UTF-16 encoding/decoding
|
||||
*
|
||||
* ```js
|
||||
* import { utf16fromString, utf16toString } from '@exodus/bytes/utf16.js'
|
||||
*
|
||||
* // loose
|
||||
* import { utf16fromStringLoose, utf16toStringLoose } from '@exodus/bytes/utf16.js'
|
||||
* ```
|
||||
*
|
||||
* _These methods by design encode/decode BOM (codepoint `U+FEFF` Byte Order Mark) as-is._\
|
||||
* _If you need BOM handling or detection, use `@exodus/bytes/encoding.js`_
|
||||
*
|
||||
* @module @exodus/bytes/utf16.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { Uint8ArrayBuffer, Uint16ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Output format for UTF-16 encoding
|
||||
*/
|
||||
export type Utf16Format = 'uint16' | 'uint8-le' | 'uint8-be';
|
||||
|
||||
/**
|
||||
* Encode a string to UTF-16 bytes (strict mode)
|
||||
*
|
||||
* Throws on invalid Unicode (unpaired surrogates)
|
||||
*
|
||||
* @param string - The string to encode
|
||||
* @param format - Output format (default: 'uint16')
|
||||
* @returns The encoded bytes
|
||||
*/
|
||||
export function utf16fromString(string: string, format?: 'uint16'): Uint16ArrayBuffer;
|
||||
export function utf16fromString(string: string, format: 'uint8-le'): Uint8ArrayBuffer;
|
||||
export function utf16fromString(string: string, format: 'uint8-be'): Uint8ArrayBuffer;
|
||||
export function utf16fromString(string: string, format?: Utf16Format): Uint16ArrayBuffer | Uint8ArrayBuffer;
|
||||
|
||||
/**
|
||||
* Encode a string to UTF-16 bytes (loose mode)
|
||||
*
|
||||
* Replaces invalid Unicode (unpaired surrogates) with replacement codepoints `U+FFFD`
|
||||
* per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
*
|
||||
* _Such replacement is a non-injective function, is irreversible and causes collisions.\
|
||||
* Prefer using strict throwing methods for cryptography applications._
|
||||
*
|
||||
* @param string - The string to encode
|
||||
* @param format - Output format (default: 'uint16')
|
||||
* @returns The encoded bytes
|
||||
*/
|
||||
export function utf16fromStringLoose(string: string, format?: 'uint16'): Uint16ArrayBuffer;
|
||||
export function utf16fromStringLoose(string: string, format: 'uint8-le'): Uint8ArrayBuffer;
|
||||
export function utf16fromStringLoose(string: string, format: 'uint8-be'): Uint8ArrayBuffer;
|
||||
export function utf16fromStringLoose(string: string, format?: Utf16Format): Uint16ArrayBuffer | Uint8ArrayBuffer;
|
||||
|
||||
/**
|
||||
* Decode UTF-16 bytes to a string (strict mode)
|
||||
*
|
||||
* Throws on invalid UTF-16 byte sequences
|
||||
*
|
||||
* Throws on non-even byte length.
|
||||
*
|
||||
* @param arr - The bytes to decode
|
||||
* @param format - Input format (default: 'uint16')
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function utf16toString(arr: Uint16Array, format?: 'uint16'): string;
|
||||
export function utf16toString(arr: Uint8Array, format: 'uint8-le'): string;
|
||||
export function utf16toString(arr: Uint8Array, format: 'uint8-be'): string;
|
||||
export function utf16toString(arr: Uint16Array | Uint8Array, format?: Utf16Format): string;
|
||||
|
||||
/**
|
||||
* Decode UTF-16 bytes to a string (loose mode)
|
||||
*
|
||||
* Replaces invalid UTF-16 byte sequences with replacement codepoints `U+FFFD`
|
||||
* per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
*
|
||||
* _Such replacement is a non-injective function, is irreversible and causes collisions.\
|
||||
* Prefer using strict throwing methods for cryptography applications._
|
||||
*
|
||||
* Throws on non-even byte length.
|
||||
*
|
||||
* @param arr - The bytes to decode
|
||||
* @param format - Input format (default: 'uint16')
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function utf16toStringLoose(arr: Uint16Array, format?: 'uint16'): string;
|
||||
export function utf16toStringLoose(arr: Uint8Array, format: 'uint8-le'): string;
|
||||
export function utf16toStringLoose(arr: Uint8Array, format: 'uint8-be'): string;
|
||||
export function utf16toStringLoose(arr: Uint16Array | Uint8Array, format?: Utf16Format): string;
|
||||
75
node_modules/@exodus/bytes/utf16.js
generated
vendored
Normal file
75
node_modules/@exodus/bytes/utf16.js
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
import * as js from './fallback/utf16.js'
|
||||
import { canDecoders, isLE, E_STRING } from './fallback/_utils.js'
|
||||
|
||||
const { TextDecoder } = globalThis // Buffer is optional
|
||||
const ignoreBOM = true
|
||||
const decoderFatalLE = canDecoders ? new TextDecoder('utf-16le', { ignoreBOM, fatal: true }) : null
|
||||
const decoderLooseLE = canDecoders ? new TextDecoder('utf-16le', { ignoreBOM }) : null
|
||||
const decoderFatalBE = canDecoders ? new TextDecoder('utf-16be', { ignoreBOM, fatal: true }) : null
|
||||
const decoderLooseBE = canDecoders ? new TextDecoder('utf-16be', { ignoreBOM }) : null
|
||||
const decoderFatal16 = isLE ? decoderFatalLE : decoderFatalBE
|
||||
const decoderLoose16 = isLE ? decoderLooseLE : decoderLooseBE
|
||||
const { isWellFormed, toWellFormed } = String.prototype
|
||||
|
||||
const { E_STRICT, E_STRICT_UNICODE } = js
|
||||
|
||||
// Unlike utf8, operates on Uint16Arrays by default
|
||||
|
||||
const to8 = (a) => new Uint8Array(a.buffer, a.byteOffset, a.byteLength)
|
||||
|
||||
function encode(str, loose = false, format = 'uint16') {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (format !== 'uint16' && format !== 'uint8-le' && format !== 'uint8-be') {
|
||||
throw new TypeError('Unknown format')
|
||||
}
|
||||
|
||||
const shouldSwap = (isLE && format === 'uint8-be') || (!isLE && format === 'uint8-le')
|
||||
|
||||
// On v8 and SpiderMonkey, check via isWellFormed is faster than js
|
||||
// On JSC, check during loop is faster than isWellFormed
|
||||
// If isWellFormed is available, we skip check during decoding and recheck after
|
||||
// If isWellFormed is unavailable, we check in js during decoding
|
||||
if (!loose && isWellFormed && !isWellFormed.call(str)) throw new TypeError(E_STRICT_UNICODE)
|
||||
const u16 = js.encode(str, loose, !loose && isWellFormed, shouldSwap)
|
||||
|
||||
if (format === 'uint8-le' || format === 'uint8-be') return to8(u16) // Already swapped
|
||||
if (format === 'uint16') return u16
|
||||
/* c8 ignore next */
|
||||
throw new Error('Unreachable')
|
||||
}
|
||||
|
||||
function decode(input, loose = false, format = 'uint16') {
|
||||
let u16
|
||||
switch (format) {
|
||||
case 'uint16':
|
||||
if (!(input instanceof Uint16Array)) throw new TypeError('Expected an Uint16Array')
|
||||
if (canDecoders) return loose ? decoderLoose16.decode(input) : decoderFatal16.decode(input)
|
||||
u16 = input
|
||||
break
|
||||
case 'uint8-le':
|
||||
if (!(input instanceof Uint8Array)) throw new TypeError('Expected an Uint8Array')
|
||||
if (input.byteLength % 2 !== 0) throw new TypeError('Expected even number of bytes')
|
||||
if (canDecoders) return loose ? decoderLooseLE.decode(input) : decoderFatalLE.decode(input)
|
||||
u16 = js.to16input(input, true)
|
||||
break
|
||||
case 'uint8-be':
|
||||
if (!(input instanceof Uint8Array)) throw new TypeError('Expected an Uint8Array')
|
||||
if (input.byteLength % 2 !== 0) throw new TypeError('Expected even number of bytes')
|
||||
if (canDecoders) return loose ? decoderLooseBE.decode(input) : decoderFatalBE.decode(input)
|
||||
u16 = js.to16input(input, false)
|
||||
break
|
||||
default:
|
||||
throw new TypeError('Unknown format')
|
||||
}
|
||||
|
||||
const str = js.decode(u16, loose, (!loose && isWellFormed) || (loose && toWellFormed))
|
||||
if (!loose && isWellFormed && !isWellFormed.call(str)) throw new TypeError(E_STRICT)
|
||||
if (loose && toWellFormed) return toWellFormed.call(str)
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
export const utf16fromString = (str, format = 'uint16') => encode(str, false, format)
|
||||
export const utf16fromStringLoose = (str, format = 'uint16') => encode(str, true, format)
|
||||
export const utf16toString = (arr, format = 'uint16') => decode(arr, false, format)
|
||||
export const utf16toStringLoose = (arr, format = 'uint16') => decode(arr, true, format)
|
||||
85
node_modules/@exodus/bytes/utf16.node.js
generated
vendored
Normal file
85
node_modules/@exodus/bytes/utf16.node.js
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
import { isDeno, isLE, E_STRING } from './fallback/_utils.js'
|
||||
import { E_STRICT, E_STRICT_UNICODE } from './fallback/utf16.js'
|
||||
|
||||
if (Buffer.TYPED_ARRAY_SUPPORT) throw new Error('Unexpected Buffer polyfill')
|
||||
|
||||
const { isWellFormed, toWellFormed } = String.prototype
|
||||
const to8 = (a) => new Uint8Array(a.buffer, a.byteOffset, a.byteLength)
|
||||
|
||||
// Unlike utf8, operates on Uint16Arrays by default
|
||||
|
||||
function encode(str, loose = false, format = 'uint16') {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (format !== 'uint16' && format !== 'uint8-le' && format !== 'uint8-be') {
|
||||
throw new TypeError('Unknown format')
|
||||
}
|
||||
|
||||
if (loose) {
|
||||
str = toWellFormed.call(str) // Buffer doesn't do this with utf16 encoding
|
||||
} else if (!isWellFormed.call(str)) {
|
||||
throw new TypeError(E_STRICT_UNICODE)
|
||||
}
|
||||
|
||||
const ble = Buffer.from(str, 'utf-16le')
|
||||
|
||||
if (format === 'uint8-le') return to8(ble)
|
||||
if (format === 'uint8-be') return to8(ble.swap16())
|
||||
if (format === 'uint16') {
|
||||
const b = ble.byteOffset % 2 === 0 ? ble : Buffer.from(ble) // it should be already aligned, but just in case
|
||||
if (!isLE) b.swap16()
|
||||
return new Uint16Array(b.buffer, b.byteOffset, b.byteLength / 2)
|
||||
}
|
||||
|
||||
/* c8 ignore next */
|
||||
throw new Error('Unreachable')
|
||||
}
|
||||
|
||||
// Convert to Buffer view or a swapped Buffer copy
|
||||
const swapped = (x, swap) => {
|
||||
const b = Buffer.from(x.buffer, x.byteOffset, x.byteLength)
|
||||
return swap ? Buffer.from(b).swap16() : b
|
||||
}
|
||||
|
||||
// We skip TextDecoder on Node.js, as it's is somewhy significantly slower than Buffer for utf16
|
||||
// Also, it incorrectly misses replacements with Node.js is built without ICU, we fix that
|
||||
function decodeNode(input, loose = false, format = 'uint16') {
|
||||
let ble
|
||||
if (format === 'uint16') {
|
||||
if (!(input instanceof Uint16Array)) throw new TypeError('Expected an Uint16Array')
|
||||
ble = swapped(input, !isLE)
|
||||
} else if (format === 'uint8-le' || format === 'uint8-be') {
|
||||
if (!(input instanceof Uint8Array)) throw new TypeError('Expected an Uint8Array')
|
||||
if (input.byteLength % 2 !== 0) throw new TypeError('Expected even number of bytes')
|
||||
ble = swapped(input, format === 'uint8-be')
|
||||
} else {
|
||||
throw new TypeError('Unknown format')
|
||||
}
|
||||
|
||||
const str = ble.ucs2Slice(0, ble.byteLength)
|
||||
if (loose) return toWellFormed.call(str)
|
||||
if (isWellFormed.call(str)) return str
|
||||
throw new TypeError(E_STRICT)
|
||||
}
|
||||
|
||||
function decodeDecoder(input, loose = false, format = 'uint16') {
|
||||
let encoding
|
||||
if (format === 'uint16') {
|
||||
if (!(input instanceof Uint16Array)) throw new TypeError('Expected an Uint16Array')
|
||||
encoding = isLE ? 'utf-16le' : 'utf-16be'
|
||||
} else if (format === 'uint8-le' || format === 'uint8-be') {
|
||||
if (!(input instanceof Uint8Array)) throw new TypeError('Expected an Uint8Array')
|
||||
if (input.byteLength % 2 !== 0) throw new TypeError('Expected even number of bytes')
|
||||
encoding = format === 'uint8-le' ? 'utf-16le' : 'utf-16be'
|
||||
} else {
|
||||
throw new TypeError('Unknown format')
|
||||
}
|
||||
|
||||
return new TextDecoder(encoding, { ignoreBOM: true, fatal: !loose }).decode(input) // TODO: cache decoder?
|
||||
}
|
||||
|
||||
const decode = isDeno ? decodeDecoder : decodeNode
|
||||
|
||||
export const utf16fromString = (str, format = 'uint16') => encode(str, false, format)
|
||||
export const utf16fromStringLoose = (str, format = 'uint16') => encode(str, true, format)
|
||||
export const utf16toString = (arr, format = 'uint16') => decode(arr, false, format)
|
||||
export const utf16toStringLoose = (arr, format = 'uint16') => decode(arr, true, format)
|
||||
96
node_modules/@exodus/bytes/utf8.d.ts
generated
vendored
Normal file
96
node_modules/@exodus/bytes/utf8.d.ts
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
/**
|
||||
* UTF-8 encoding/decoding
|
||||
*
|
||||
* ```js
|
||||
* import { utf8fromString, utf8toString } from '@exodus/bytes/utf8.js'
|
||||
*
|
||||
* // loose
|
||||
* import { utf8fromStringLoose, utf8toStringLoose } from '@exodus/bytes/utf8.js'
|
||||
* ```
|
||||
*
|
||||
* _These methods by design encode/decode BOM (codepoint `U+FEFF` Byte Order Mark) as-is._\
|
||||
* _If you need BOM handling or detection, use `@exodus/bytes/encoding.js`_
|
||||
*
|
||||
* @module @exodus/bytes/utf8.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { OutputFormat, Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* Encode a string to UTF-8 bytes (strict mode)
|
||||
*
|
||||
* Throws on invalid Unicode (unpaired surrogates)
|
||||
*
|
||||
* This is similar to the following snippet (but works on all engines):
|
||||
* ```js
|
||||
* // Strict encode, requiring Unicode codepoints to be valid
|
||||
* if (typeof string !== 'string' || !string.isWellFormed()) throw new TypeError()
|
||||
* return new TextEncoder().encode(string)
|
||||
* ```
|
||||
*
|
||||
* @param string - The string to encode
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The encoded bytes
|
||||
*/
|
||||
export function utf8fromString(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
export function utf8fromString(string: string, format: 'buffer'): Buffer;
|
||||
export function utf8fromString(string: string, format?: OutputFormat): Uint8ArrayBuffer | Buffer;
|
||||
|
||||
/**
|
||||
* Encode a string to UTF-8 bytes (loose mode)
|
||||
*
|
||||
* Replaces invalid Unicode (unpaired surrogates) with replacement codepoints `U+FFFD`
|
||||
* per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
*
|
||||
* _Such replacement is a non-injective function, is irreversable and causes collisions.\
|
||||
* Prefer using strict throwing methods for cryptography applications._
|
||||
*
|
||||
* This is similar to the following snippet (but works on all engines):
|
||||
* ```js
|
||||
* // Loose encode, replacing invalid Unicode codepoints with U+FFFD
|
||||
* if (typeof string !== 'string') throw new TypeError()
|
||||
* return new TextEncoder().encode(string)
|
||||
* ```
|
||||
*
|
||||
* @param string - The string to encode
|
||||
* @param format - Output format (default: 'uint8')
|
||||
* @returns The encoded bytes
|
||||
*/
|
||||
export function utf8fromStringLoose(string: string, format?: 'uint8'): Uint8ArrayBuffer;
|
||||
export function utf8fromStringLoose(string: string, format: 'buffer'): Buffer;
|
||||
export function utf8fromStringLoose(
|
||||
string: string,
|
||||
format?: OutputFormat
|
||||
): Uint8ArrayBuffer | Buffer;
|
||||
|
||||
/**
|
||||
* Decode UTF-8 bytes to a string (strict mode)
|
||||
*
|
||||
* Throws on invalid UTF-8 byte sequences
|
||||
*
|
||||
* This is similar to `new TextDecoder('utf-8', { fatal: true, ignoreBOM: true }).decode(arr)`,
|
||||
* but works on all engines.
|
||||
*
|
||||
* @param arr - The bytes to decode
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function utf8toString(arr: Uint8Array): string;
|
||||
|
||||
/**
|
||||
* Decode UTF-8 bytes to a string (loose mode)
|
||||
*
|
||||
* Replaces invalid UTF-8 byte sequences with replacement codepoints `U+FFFD`
|
||||
* per [WHATWG Encoding](https://encoding.spec.whatwg.org/) specification.
|
||||
*
|
||||
* _Such replacement is a non-injective function, is irreversable and causes collisions.\
|
||||
* Prefer using strict throwing methods for cryptography applications._
|
||||
*
|
||||
* This is similar to `new TextDecoder('utf-8', { ignoreBOM: true }).decode(arr)`,
|
||||
* but works on all engines.
|
||||
*
|
||||
* @param arr - The bytes to decode
|
||||
* @returns The decoded string
|
||||
*/
|
||||
export function utf8toStringLoose(arr: Uint8Array): string;
|
||||
85
node_modules/@exodus/bytes/utf8.js
generated
vendored
Normal file
85
node_modules/@exodus/bytes/utf8.js
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { typedView } from './array.js'
|
||||
import { isHermes, nativeDecoder, nativeEncoder, E_STRING } from './fallback/_utils.js'
|
||||
import { asciiPrefix, decodeLatin1 } from './fallback/latin1.js'
|
||||
import * as js from './fallback/utf8.js'
|
||||
|
||||
const { TextDecoder, decodeURIComponent, escape } = globalThis // Buffer is optional
|
||||
// ignoreBOM: true means that BOM will be left as-is, i.e. will be present in the output
|
||||
// We don't want to strip anything unexpectedly
|
||||
const decoderLoose = nativeDecoder
|
||||
const decoderFatal = nativeDecoder
|
||||
? new TextDecoder('utf-8', { ignoreBOM: true, fatal: true })
|
||||
: null
|
||||
const { isWellFormed } = String.prototype
|
||||
|
||||
const { E_STRICT, E_STRICT_UNICODE } = js
|
||||
|
||||
const shouldUseEscapePath = isHermes // faster only on Hermes, js path beats it on normal engines
|
||||
|
||||
function deLoose(str, loose, res) {
|
||||
if (loose || str.length === res.length) return res // length is equal only for ascii, which is automatically fine
|
||||
if (isWellFormed) {
|
||||
// We have a fast native method
|
||||
if (isWellFormed.call(str)) return res
|
||||
throw new TypeError(E_STRICT_UNICODE)
|
||||
}
|
||||
|
||||
// Recheck if the string was encoded correctly
|
||||
let start = 0
|
||||
const last = res.length - 3
|
||||
// Search for EFBFBD (3-byte sequence)
|
||||
while (start <= last) {
|
||||
const pos = res.indexOf(0xef, start)
|
||||
if (pos === -1 || pos > last) break
|
||||
start = pos + 1
|
||||
if (res[pos + 1] === 0xbf && res[pos + 2] === 0xbd) {
|
||||
// Found a replacement char in output, need to recheck if we encoded the input correctly
|
||||
if (!nativeDecoder && str.length < 1e7) {
|
||||
// This is ~2x faster than decode in Hermes
|
||||
try {
|
||||
if (encodeURI(str) !== null) return res // guard against optimizing out
|
||||
} catch {}
|
||||
} else if (str === decode(res)) return res
|
||||
throw new TypeError(E_STRICT_UNICODE)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
function encode(str, loose = false) {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
if (str.length === 0) return new Uint8Array() // faster than Uint8Array.of
|
||||
if (nativeEncoder) return deLoose(str, loose, nativeEncoder.encode(str))
|
||||
// No reason to use unescape + encodeURIComponent: it's slower than JS on normal engines, and modern Hermes already has TextEncoder
|
||||
return js.encode(str, loose)
|
||||
}
|
||||
|
||||
function decode(arr, loose = false) {
|
||||
assertUint8(arr)
|
||||
if (arr.byteLength === 0) return ''
|
||||
if (nativeDecoder) return loose ? decoderLoose.decode(arr) : decoderFatal.decode(arr) // Node.js and browsers
|
||||
|
||||
// Fast path for ASCII prefix, this is faster than all alternatives below
|
||||
const prefix = decodeLatin1(arr, 0, asciiPrefix(arr)) // No native decoder to use, so decodeAscii is useless here
|
||||
if (prefix.length === arr.length) return prefix
|
||||
|
||||
// This codepath gives a ~3x perf boost on Hermes
|
||||
if (shouldUseEscapePath && escape && decodeURIComponent) {
|
||||
const o = escape(decodeLatin1(arr, prefix.length, arr.length))
|
||||
try {
|
||||
return prefix + decodeURIComponent(o) // Latin1 to utf8
|
||||
} catch {
|
||||
if (!loose) throw new TypeError(E_STRICT)
|
||||
// Ok, we have to use manual implementation for loose decoder
|
||||
}
|
||||
}
|
||||
|
||||
return prefix + js.decode(arr, loose, prefix.length)
|
||||
}
|
||||
|
||||
export const utf8fromString = (str, format = 'uint8') => typedView(encode(str, false), format)
|
||||
export const utf8fromStringLoose = (str, format = 'uint8') => typedView(encode(str, true), format)
|
||||
export const utf8toString = (arr) => decode(arr, false)
|
||||
export const utf8toStringLoose = (arr) => decode(arr, true)
|
||||
68
node_modules/@exodus/bytes/utf8.node.js
generated
vendored
Normal file
68
node_modules/@exodus/bytes/utf8.node.js
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
import { assertUint8 } from './assert.js'
|
||||
import { typedView } from './array.js'
|
||||
import { E_STRING } from './fallback/_utils.js'
|
||||
import { E_STRICT, E_STRICT_UNICODE } from './fallback/utf8.js'
|
||||
import { isAscii } from 'node:buffer'
|
||||
|
||||
if (Buffer.TYPED_ARRAY_SUPPORT) throw new Error('Unexpected Buffer polyfill')
|
||||
|
||||
let decoderFatal
|
||||
const decoderLoose = new TextDecoder('utf-8', { ignoreBOM: true })
|
||||
const { isWellFormed } = String.prototype
|
||||
const isDeno = !!globalThis.Deno
|
||||
|
||||
try {
|
||||
decoderFatal = new TextDecoder('utf-8', { ignoreBOM: true, fatal: true })
|
||||
} catch {
|
||||
// Without ICU, Node.js doesn't support fatal option for utf-8
|
||||
}
|
||||
|
||||
function encode(str, loose = false) {
|
||||
if (typeof str !== 'string') throw new TypeError(E_STRING)
|
||||
const strLength = str.length
|
||||
if (strLength === 0) return new Uint8Array() // faster than Uint8Array.of
|
||||
let res
|
||||
if (strLength > 0x4_00 && !isDeno) {
|
||||
// Faster for large strings
|
||||
const byteLength = Buffer.byteLength(str)
|
||||
res = Buffer.allocUnsafe(byteLength)
|
||||
const ascii = byteLength === strLength
|
||||
const written = ascii ? res.latin1Write(str) : res.utf8Write(str)
|
||||
if (written !== byteLength) throw new Error('Failed to write all bytes') // safeguard just in case
|
||||
if (ascii || loose) return res // no further checks needed
|
||||
} else {
|
||||
res = Buffer.from(str)
|
||||
if (res.length === strLength || loose) return res
|
||||
}
|
||||
|
||||
if (!isWellFormed.call(str)) throw new TypeError(E_STRICT_UNICODE)
|
||||
return res
|
||||
}
|
||||
|
||||
function decode(arr, loose = false) {
|
||||
assertUint8(arr)
|
||||
const byteLength = arr.byteLength
|
||||
if (byteLength === 0) return ''
|
||||
if (byteLength > 0x6_00 && !(isDeno && loose) && isAscii(arr)) {
|
||||
// On non-ascii strings, this loses ~10% * [relative position of the first non-ascii byte] (up to 10% total)
|
||||
// On ascii strings, this wins 1.5x on loose = false and 1.3x on loose = true
|
||||
// Only makes sense for large enough strings
|
||||
const buf = Buffer.from(arr.buffer, arr.byteOffset, arr.byteLength)
|
||||
if (isDeno) return buf.toString() // Deno suffers from .latin1Slice
|
||||
return buf.latin1Slice(0, arr.byteLength) // .latin1Slice is faster than .asciiSlice
|
||||
}
|
||||
|
||||
if (loose) return decoderLoose.decode(arr)
|
||||
if (decoderFatal) return decoderFatal.decode(arr)
|
||||
|
||||
// We are in an env without native fatal decoder support (non-fixed Node.js without ICU)
|
||||
// Well, just recheck against encode if it contains replacement then, this is still faster than js impl
|
||||
const str = decoderLoose.decode(arr)
|
||||
if (str.includes('\uFFFD') && !Buffer.from(str).equals(arr)) throw new TypeError(E_STRICT)
|
||||
return str
|
||||
}
|
||||
|
||||
export const utf8fromString = (str, format = 'uint8') => typedView(encode(str, false), format)
|
||||
export const utf8fromStringLoose = (str, format = 'uint8') => typedView(encode(str, true), format)
|
||||
export const utf8toString = (arr) => decode(arr, false)
|
||||
export const utf8toStringLoose = (arr) => decode(arr, true)
|
||||
48
node_modules/@exodus/bytes/whatwg.d.ts
generated
vendored
Normal file
48
node_modules/@exodus/bytes/whatwg.d.ts
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* WHATWG helpers
|
||||
*
|
||||
* ```js
|
||||
* import '@exodus/bytes/encoding.js' // For full legacy multi-byte encodings support
|
||||
* import { percentEncodeAfterEncoding } from '@exodus/bytes/whatwg.js'
|
||||
* ```
|
||||
*
|
||||
* @module @exodus/bytes/whatwg.js
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implements [percent-encode after encoding](https://url.spec.whatwg.org/#string-percent-encode-after-encoding)
|
||||
* per WHATWG URL specification.
|
||||
*
|
||||
* > [!IMPORTANT]
|
||||
* > You must import `@exodus/bytes/encoding.js` for this API to accept legacy multi-byte encodings.
|
||||
*
|
||||
* Encodings `utf16-le`, `utf16-be`, and `replacement` are not accepted.
|
||||
*
|
||||
* [C0 control percent-encode set](https://url.spec.whatwg.org/#c0-control-percent-encode-set) is
|
||||
* always percent-encoded.
|
||||
*
|
||||
* `percentEncodeSet` is an addition to that, and must be a string of unique increasing codepoints
|
||||
* in range 0x20 - 0x7e, e.g. `' "#<>'`.
|
||||
*
|
||||
* This method accepts [DOMStrings](https://webidl.spec.whatwg.org/#idl-DOMString) and converts them
|
||||
* to [USVStrings](https://webidl.spec.whatwg.org/#idl-USVString).
|
||||
* This is different from e.g. `encodeURI` and `encodeURIComponent` which throw on surrogates:
|
||||
* ```js
|
||||
* > percentEncodeAfterEncoding('utf8', '\ud800', ' "#$%&+,/:;<=>?@[\\]^`{|}') // component
|
||||
* '%EF%BF%BD'
|
||||
* > encodeURIComponent('\ud800')
|
||||
* Uncaught URIError: URI malformed
|
||||
* ```
|
||||
*
|
||||
* @param encoding - The encoding label per WHATWG Encoding spec
|
||||
* @param input - Input scalar-value string to encode
|
||||
* @param percentEncodeSet - A string of ASCII chars to escape in addition to C0 control percent-encode set
|
||||
* @param spaceAsPlus - Whether to encode space as `'+'` instead of `'%20'` or `' '` (default: false)
|
||||
* @returns The percent-encoded string
|
||||
*/
|
||||
export function percentEncodeAfterEncoding(
|
||||
encoding: string,
|
||||
input: string,
|
||||
percentEncodeSet: string,
|
||||
spaceAsPlus?: boolean
|
||||
): string;
|
||||
76
node_modules/@exodus/bytes/whatwg.js
generated
vendored
Normal file
76
node_modules/@exodus/bytes/whatwg.js
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
import { utf8fromStringLoose } from '@exodus/bytes/utf8.js'
|
||||
import { createSinglebyteEncoder } from '@exodus/bytes/single-byte.js'
|
||||
import { isMultibyte, getMultibyteEncoder } from './fallback/encoding.js'
|
||||
import { normalizeEncoding, E_ENCODING } from './fallback/encoding.api.js'
|
||||
import { percentEncoder } from './fallback/percent.js'
|
||||
import { encodeMap } from './fallback/single-byte.js'
|
||||
import { E_STRING } from './fallback/_utils.js'
|
||||
|
||||
// https://url.spec.whatwg.org/#string-percent-encode-after-encoding
|
||||
// Codepoints below 0x20, 0x7F specifically, and above 0x7F (non-ASCII) are always encoded
|
||||
// > A C0 control is a code point in the range U+0000 NULL to U+001F INFORMATION SEPARATOR ONE, inclusive.
|
||||
// > The C0 control percent-encode set are the C0 controls and all code points greater than U+007E (~).
|
||||
export function percentEncodeAfterEncoding(encoding, input, percentEncodeSet, spaceAsPlus = false) {
|
||||
const enc = normalizeEncoding(encoding)
|
||||
// Ref: https://encoding.spec.whatwg.org/#get-an-encoder
|
||||
if (!enc || enc === 'replacement' || enc === 'utf-16le' || enc === 'utf-16be') {
|
||||
throw new RangeError(E_ENCODING)
|
||||
}
|
||||
|
||||
const percent = percentEncoder(percentEncodeSet, spaceAsPlus)
|
||||
if (enc === 'utf-8') return percent(utf8fromStringLoose(input))
|
||||
|
||||
const multi = isMultibyte(enc)
|
||||
const encoder = multi ? getMultibyteEncoder() : createSinglebyteEncoder
|
||||
const fatal = encoder(enc)
|
||||
try {
|
||||
return percent(fatal(input))
|
||||
} catch {}
|
||||
|
||||
let res = ''
|
||||
let last = 0
|
||||
if (multi) {
|
||||
const rep = enc === 'gb18030' ? percent(fatal('\uFFFD')) : `%26%23${0xff_fd}%3B` // only gb18030 can encode it
|
||||
const escaping = encoder(enc, (cp, u, i) => {
|
||||
res += percent(u, last, i)
|
||||
res += cp >= 0xd8_00 && cp < 0xe0_00 ? rep : `%26%23${cp}%3B` // &#cp;
|
||||
last = i
|
||||
return 0 // no bytes emitted
|
||||
})
|
||||
|
||||
const u = escaping(input) // has side effects on res
|
||||
res += percent(u, last)
|
||||
} else {
|
||||
if (typeof input !== 'string') throw new TypeError(E_STRING) // all other paths have their own validation
|
||||
const m = encodeMap(enc)
|
||||
const len = input.length
|
||||
const u = new Uint8Array(len)
|
||||
for (let i = 0; i < len; i++) {
|
||||
const x = input.charCodeAt(i)
|
||||
const b = m[x]
|
||||
if (!b && x) {
|
||||
let cp = x
|
||||
const i0 = i
|
||||
if (x >= 0xd8_00 && x < 0xe0_00) {
|
||||
cp = 0xff_fd
|
||||
if (x < 0xdc_00 && i + 1 < len) {
|
||||
const x1 = input.charCodeAt(i + 1)
|
||||
if (x1 >= 0xdc_00 && x1 < 0xe0_00) {
|
||||
cp = 0x1_00_00 + ((x1 & 0x3_ff) | ((x & 0x3_ff) << 10))
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res += `${percent(u, last, i0)}%26%23${cp}%3B` // &#cp;
|
||||
last = i + 1 // skip current
|
||||
} else {
|
||||
u[i] = b
|
||||
}
|
||||
}
|
||||
|
||||
res += percent(u, last)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
76
node_modules/@exodus/bytes/wif.d.ts
generated
vendored
Normal file
76
node_modules/@exodus/bytes/wif.d.ts
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
/**
|
||||
* Wallet Import Format (WIF) encoding and decoding.
|
||||
*
|
||||
* ```js
|
||||
* import { fromWifString, toWifString } from '@exodus/bytes/wif.js'
|
||||
* import { fromWifStringSync, toWifStringSync } from '@exodus/bytes/wif.js'
|
||||
* ```
|
||||
*
|
||||
* On non-Node.js, requires peer dependency [@noble/hashes](https://www.npmjs.com/package/@noble/hashes) to be installed.
|
||||
*
|
||||
* @module @exodus/bytes/wif.js
|
||||
*/
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import type { Uint8ArrayBuffer } from './array.js';
|
||||
|
||||
/**
|
||||
* WIF (Wallet Import Format) data structure
|
||||
*/
|
||||
export interface Wif {
|
||||
/** Network version byte */
|
||||
version: number;
|
||||
/** 32-byte private key */
|
||||
privateKey: Uint8ArrayBuffer;
|
||||
/** Whether the key is compressed */
|
||||
compressed: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a WIF string to WIF data
|
||||
*
|
||||
* Returns a promise that resolves to an object with `{ version, privateKey, compressed }`.
|
||||
*
|
||||
* The optional `version` parameter validates the version byte.
|
||||
*
|
||||
* Throws if the WIF string is invalid or version doesn't match.
|
||||
*
|
||||
* @param string - The WIF encoded string
|
||||
* @param version - Optional expected version byte to validate against
|
||||
* @returns The decoded WIF data
|
||||
* @throws Error if the WIF string is invalid or version doesn't match
|
||||
*/
|
||||
export function fromWifString(string: string, version?: number): Promise<Wif>;
|
||||
|
||||
/**
|
||||
* Decode a WIF string to WIF data (synchronous)
|
||||
*
|
||||
* Returns an object with `{ version, privateKey, compressed }`.
|
||||
*
|
||||
* The optional `version` parameter validates the version byte.
|
||||
*
|
||||
* Throws if the WIF string is invalid or version doesn't match.
|
||||
*
|
||||
* @param string - The WIF encoded string
|
||||
* @param version - Optional expected version byte to validate against
|
||||
* @returns The decoded WIF data
|
||||
* @throws Error if the WIF string is invalid or version doesn't match
|
||||
*/
|
||||
export function fromWifStringSync(string: string, version?: number): Wif;
|
||||
|
||||
/**
|
||||
* Encode WIF data to a WIF string
|
||||
*
|
||||
* @param wif - The WIF data to encode
|
||||
* @returns The WIF encoded string
|
||||
*/
|
||||
export function toWifString(wif: Wif): Promise<string>;
|
||||
|
||||
/**
|
||||
* Encode WIF data to a WIF string (synchronous)
|
||||
*
|
||||
* @param wif - The WIF data to encode
|
||||
* @returns The WIF encoded string
|
||||
*/
|
||||
export function toWifStringSync(wif: Wif): string;
|
||||
41
node_modules/@exodus/bytes/wif.js
generated
vendored
Normal file
41
node_modules/@exodus/bytes/wif.js
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
import { toBase58checkSync, fromBase58checkSync } from '@exodus/bytes/base58check.js'
|
||||
import { assertUint8 } from './assert.js'
|
||||
|
||||
// Mostly matches npmjs.com/wif, but with extra checks + using our base58check
|
||||
// Also no inconsistent behavior on Buffer/Uint8Array input
|
||||
|
||||
function from(arr, expectedVersion) {
|
||||
assertUint8(arr)
|
||||
if (arr.length !== 33 && arr.length !== 34) throw new Error('Invalid WIF length')
|
||||
const version = arr[0]
|
||||
if (expectedVersion !== undefined && version !== expectedVersion) {
|
||||
throw new Error('Invalid network version')
|
||||
}
|
||||
|
||||
// Makes a copy, regardless of input being a Buffer or a Uint8Array (unlike .slice)
|
||||
const privateKey = Uint8Array.from(arr.subarray(1, 33))
|
||||
if (arr.length === 33) return { version, privateKey, compressed: false }
|
||||
if (arr[33] !== 1) throw new Error('Invalid compression flag')
|
||||
return { version, privateKey, compressed: true }
|
||||
}
|
||||
|
||||
function to({ version: v, privateKey, compressed }) {
|
||||
if (!Number.isSafeInteger(v) || v < 0 || v > 0xff) throw new Error('Missing or invalid version')
|
||||
assertUint8(privateKey, { length: 32, name: 'privateKey' })
|
||||
const out = new Uint8Array(compressed ? 34 : 33)
|
||||
out[0] = v
|
||||
out.set(privateKey, 1)
|
||||
if (compressed) out[33] = 1
|
||||
return out
|
||||
}
|
||||
|
||||
// Async performance is worse here, so expose the same internal methods as sync for now
|
||||
// ./base58check is sync internally anyway for now, so doesn't matter until that is changed
|
||||
|
||||
export const fromWifStringSync = (string, version) => from(fromBase58checkSync(string), version)
|
||||
// export const fromWifString = async (string, version) => from(await fromBase58check(string), version)
|
||||
export const fromWifString = async (string, version) => from(fromBase58checkSync(string), version)
|
||||
|
||||
export const toWifStringSync = (wif) => toBase58checkSync(to(wif))
|
||||
// export const toWifString = async (wif) => toBase58check(to(wif))
|
||||
export const toWifString = async (wif) => toBase58checkSync(to(wif))
|
||||
Reference in New Issue
Block a user